hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
e2fc8de3f02ab54c8d39945e7fec6eca5398d60123ab5b4c79b7e02099563cb6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree # noqa: E402
from astropy import units as u # noqa: E402
from astropy.time import Time, TimeDelta # noqa: E402
@pytest.mark.parametrize("fmt", TimeDelta.FORMATS.keys())
def test_timedelta(fmt, tmpdir):
t1 = Time(Time.now())
t2 = Time(Time.now())
td = TimeDelta(t2 - t1, format=fmt)
tree = dict(timedelta=td)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("scale", list(TimeDelta.SCALES) + [None])
def test_timedelta_scales(scale, tmpdir):
tree = dict(timedelta=TimeDelta(0.125, scale=scale, format="jd"))
assert_roundtrip_tree(tree, tmpdir)
def test_timedelta_vector(tmpdir):
tree = dict(timedelta=TimeDelta([1, 2] * u.day))
assert_roundtrip_tree(tree, tmpdir)
|
898a27d86757e70daacd177486613d75d805e4eb113ee917c8500e0de9140cde | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import datetime # noqa: E402
import asdf.schema as asdf_schema # noqa: E402
import numpy as np # noqa: E402
from asdf import AsdfFile, tagged, yamlutil # noqa: E402
from asdf.tests import helpers # noqa: E402
from astropy import time # noqa: E402
def _flatten_combiners(schema):
newschema = dict()
def add_entry(path, schema, combiner):
# TODO: Simplify?
cursor = newschema
for i in range(len(path)):
part = path[i]
if isinstance(part, int):
cursor = cursor.setdefault("items", [])
while len(cursor) <= part:
cursor.append({})
cursor = cursor[part]
elif part == "items":
cursor = cursor.setdefault("items", dict())
else:
cursor = cursor.setdefault("properties", dict())
if i < len(path) - 1 and isinstance(path[i + 1], int):
cursor = cursor.setdefault(part, [])
else:
cursor = cursor.setdefault(part, dict())
cursor.update(schema)
def test_time(tmpdir):
time_array = time.Time(np.arange(100), format="unix")
tree = {"large_time_array": time_array}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location(tmpdir):
# See https://github.com/spacetelescope/asdf/issues/341
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = time.Time([1, 2], location=location, format="cxcsec")
tree = {"time": t}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location_1_0_0(tmpdir):
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=6378100 * u.m, y=0 * u.m, z=0 * u.m)
t = time.Time("J2000.000", location=location, format="jyear_str")
tree = {"time": t}
# The version refers to ASDF Standard 1.0.0, which includes time-1.0.0
helpers.assert_roundtrip_tree(tree, tmpdir, init_options={"version": "1.0.0"})
def test_isot(tmpdir):
isot = time.Time("2000-01-01T00:00:00.000")
tree = {"time": isot}
helpers.assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
if isinstance(tree["time"], str):
assert str(tree["time"]) == isot.value
elif isinstance(tree["time"], dict):
assert str(tree["time"]["value"]) == isot.value
assert str(tree["time"]["base_format"]) == "isot"
else:
assert False
def test_isot_array(tmpdir):
tree = {"time": time.Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"])}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_tag():
schema = asdf_schema.load_schema(
"http://stsci.edu/schemas/asdf/time/time-1.1.0", resolve_references=True
)
schema = _flatten_combiners(schema)
date = time.Time(datetime.datetime.now())
tree = {"date": date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree["date"], asdf)
asdf_schema.validate(instance, schema=schema)
tag = "tag:stsci.edu:asdf/time/time-1.1.0"
date = tagged.tag_object(tag, date)
tree = {"date": date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree["date"], asdf)
asdf_schema.validate(instance, schema=schema)
|
3ea7f405793c6941bac7917b058a4edde486321b729fd82ec05602aeaef9d6aa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree # noqa: E402
from astropy import units # noqa: E402
from astropy.coordinates import FK5, ICRS, Angle, Latitude, Longitude # noqa: E402
def test_hcrs_basic(tmpdir):
ra = Longitude(25, unit=units.deg)
dec = Latitude(45, unit=units.deg)
tree = {"coord": ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_basic(tmpdir):
wrap_angle = Angle(1.5, unit=units.rad)
ra = Longitude(25, unit=units.deg, wrap_angle=wrap_angle)
dec = Latitude(45, unit=units.deg)
tree = {"coord": ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_nodata(tmpdir):
tree = {"coord": ICRS()}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_compound(tmpdir):
icrs = ICRS(ra=[0, 1, 2] * units.deg, dec=[3, 4, 5] * units.deg)
tree = {"coord": icrs}
assert_roundtrip_tree(tree, tmpdir)
def test_fk5_time(tmpdir):
tree = {"coord": FK5(equinox="2011-01-01T00:00:00")}
assert_roundtrip_tree(tree, tmpdir)
|
83916704a87acb2229af8f179c266880c5224b8a5a6a9ec27a16c5b3e851d922 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree # noqa: E402
from astropy import units as u # noqa: E402
from astropy.coordinates.angles import Latitude, Longitude # noqa: E402
from astropy.coordinates.earth import ELLIPSOIDS, EarthLocation # noqa: E402
@pytest.fixture
def position():
lon = Longitude(
[0.0, 45.0, 90.0, 135.0, 180.0, -180, -90, -45], u.deg, wrap_angle=180 * u.deg
)
lat = Latitude([+0.0, 30.0, 60.0, +90.0, -90.0, -60.0, -30.0, 0.0], u.deg)
h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11.0, -0.1], u.m)
return lon, lat, h
def test_earthlocation_quantity(tmpdir):
location = EarthLocation(
lat=34.4900 * u.deg, lon=-104.221800 * u.deg, height=40 * u.km
)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation(position, tmpdir):
x, y, z = EarthLocation.from_geodetic(*position).to_geocentric()
geocentric = EarthLocation(x, y, z)
tree = dict(location=geocentric)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("ellipsoid", ELLIPSOIDS)
def test_earthlocation_geodetic(position, ellipsoid, tmpdir):
location = EarthLocation.from_geodetic(*position, ellipsoid=ellipsoid)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation_site(tmpdir):
orig_sites = getattr(EarthLocation, "_site_registry", None)
try:
EarthLocation._get_site_registry(force_builtin=True)
rog = EarthLocation.of_site("greenwich")
tree = dict(location=rog)
assert_roundtrip_tree(tree, tmpdir)
finally:
EarthLocation._site_registry = orig_sites
|
aa0e27ce5a4e95f4993116f9f2f58b5ac4b72d9fd5377c874fdac0e5be6d30ee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import FK4, ICRS, Galactic, Longitude, SkyCoord
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree # noqa: E402
# These tests are cribbed directly from the Examples section of
# https://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html
def test_scalar_skycoord(tmpdir):
c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_vector_skycoord(tmpdir):
c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_fk4(tmpdir):
coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
c = SkyCoord(coords, frame=FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize(
"coord",
[
SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic), # Units from string
SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s"),
],
)
def test_skycoord_galactic(coord, tmpdir):
tree = dict(coord=coord)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_ra_dec(tmpdir):
ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
c = SkyCoord(ra, dec, frame="icrs")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime="2001-01-02T12:34:56")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_override_defaults(tmpdir):
c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
c = SkyCoord(c, obstime="J2010.11", equinox="B1965") # Override defaults
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_cartesian(tmpdir):
c = SkyCoord(
w=0, u=1, v=2, unit="kpc", frame="galactic", representation_type="cartesian"
)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_vector_frames(tmpdir):
c = SkyCoord([ICRS(ra=1 * u.deg, dec=2 * u.deg), ICRS(ra=3 * u.deg, dec=4 * u.deg)])
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason="Velocities are not properly serialized yet")
def test_skycoord_radial_velocity(tmpdir):
c = SkyCoord(ra=1 * u.deg, dec=2 * u.deg, radial_velocity=10 * u.km / u.s)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason="Velocities are not properly serialized yet")
def test_skycoord_proper_motion(tmpdir):
c = SkyCoord(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=2 * u.mas / u.yr,
pm_dec=1 * u.mas / u.yr,
)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.skip(reason="Apparent loss of precision during serialization")
def test_skycoord_extra_attribute(tmpdir):
sc = SkyCoord(10 * u.deg, 20 * u.deg, equinox="2011-01-01T00:00", frame="fk4")
tree = dict(coord=sc.transform_to("icrs"))
def check_asdf(asdffile):
assert hasattr(asdffile["coord"], "equinox")
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check_asdf)
def test_skycoord_2d_obstime(tmpdir):
sc = (
SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,m",
frame="fk4",
obstime=["J1990.5", "J1991.5"],
),
)
tree = dict(coord=sc)
assert_roundtrip_tree(tree, tmpdir)
|
80ea2e31e919b8ffcf334ad865300708cf7930cd0ecb678e4a048da9a18b9311 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree # noqa: E402
import astropy.units as u # noqa: E402
from astropy.coordinates import Angle, Latitude, Longitude # noqa: E402
def test_angle(tmpdir):
tree = {"angle": Angle(100, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_latitude(tmpdir):
tree = {"angle": Latitude(10, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_longitude(tmpdir):
tree = {"angle": Longitude(-100, u.deg, wrap_angle=180 * u.deg)}
assert_roundtrip_tree(tree, tmpdir)
|
02e65c9455a23c977c7a7cb0494901512464508f09fa4fa9e4ba3822902ea40b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree # noqa: E402
from numpy.random import randint, random # noqa: E402
import astropy.coordinates.representation as r # noqa: E402
import astropy.units as u # noqa: E402
from astropy.coordinates import Angle # noqa: E402
@pytest.fixture(params=filter(lambda x: "Base" not in x, r.__all__))
def representation(request):
rep = getattr(r, request.param)
angle_unit = u.deg
other_unit = u.km
kwargs = {}
arr_len = randint(1, 100)
for aname, atype in rep.attr_classes.items():
if issubclass(atype, Angle):
value = ([random()] * arr_len) * angle_unit
else:
value = ([random()] * arr_len) * other_unit
kwargs[aname] = value
return rep(**kwargs)
def test_representations(tmpdir, representation):
tree = {"representation": representation}
assert_roundtrip_tree(tree, tmpdir)
|
f8c99b26af4585a74a5402b7f3c9bd30154c665d37a9a0efa3007274e069826f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
from astropy.coordinates import ICRS, Galactic, SpectralCoord
from astropy.tests.helper import assert_quantity_allclose
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree # noqa: E402
def test_scalar_spectralcoord(tmpdir):
sc = SpectralCoord(565 * u.nm)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile["spectralcoord"], SpectralCoord)
assert_quantity_allclose(asdffile["spectralcoord"].quantity, 565 * u.nm)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_vector_spectralcoord(tmpdir):
sc = SpectralCoord([100, 200, 300] * u.GHz)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile["spectralcoord"], SpectralCoord)
assert_quantity_allclose(
asdffile["spectralcoord"].quantity, [100, 200, 300] * u.GHz
)
assert_roundtrip_tree(
tree, tmpdir, asdf_check_func=check, tree_match_func=assert_quantity_allclose
)
@pytest.mark.filterwarnings("ignore:No velocity")
def test_spectralcoord_with_obstarget(tmpdir):
sc = SpectralCoord(
10 * u.GHz,
observer=ICRS(1 * u.km, 2 * u.km, 3 * u.km, representation_type="cartesian"),
target=Galactic(10 * u.deg, 20 * u.deg, distance=30 * u.pc),
)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile["spectralcoord"], SpectralCoord)
assert_quantity_allclose(asdffile["spectralcoord"].quantity, 10 * u.GHz)
assert isinstance(asdffile["spectralcoord"].observer, ICRS)
assert isinstance(asdffile["spectralcoord"].target, Galactic)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
|
ae46907f6ffc08bf4dba4b8560a3636b9ac466d57162ed12fc2af9a4b1becb02 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests import helpers # noqa: E402
from astropy import units as u # noqa: E402
# TODO: Implement defunit
def test_unit():
yaml = """
unit: !unit/unit-1.0.0 "2.1798721 10-18kg m2 s-2"
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert ff.tree["unit"].is_equivalent(u.Ry)
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert ff.tree["unit"].is_equivalent(u.Ry)
|
05d59328f3829bd25b3e9ad410a5e3eed6a4848d29ef4fdd52135df2fc1b086e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import io # noqa: E402
from asdf.tests import helpers # noqa: E402
from astropy import units # noqa: E402
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert (ff.tree["quantity"] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert (ff.tree["quantity"] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = f"""
quantity: !unit/quantity-1.1.0
value: {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x * 2.3081 for x in range(10)]
testunit = units.ampere
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1, 2, 3], [4, 5, 6]]
testunit = units.km
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{testval}
unit: {testunit}
"""
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
|
d5b0facf28f09e7e318ced0a6a0c4bd34646abafd40190b97b1cc98419623989 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
from astropy.cosmology import Planck15
from astropy.cosmology.units import with_H0
from astropy.units import equivalencies as eq
asdf = pytest.importorskip("asdf", minversion="2.3.0.dev0")
from asdf.tests import helpers # noqa: E402
def get_equivalencies():
"""
Return a list of example equivalencies for testing serialization.
"""
return [
eq.plate_scale(0.3 * u.deg / u.mm),
eq.pixel_scale(0.5 * u.deg / u.pix),
eq.pixel_scale(100.0 * u.pix / u.cm),
eq.spectral_density(350 * u.nm, factor=2),
eq.spectral_density(350 * u.nm),
eq.spectral(),
eq.brightness_temperature(500 * u.GHz),
eq.brightness_temperature(500 * u.GHz, beam_area=23 * u.sr),
eq.temperature_energy(),
eq.temperature(),
eq.thermodynamic_temperature(300 * u.Hz),
eq.thermodynamic_temperature(140 * u.GHz, Planck15.Tcmb0),
eq.beam_angular_area(3 * u.sr),
eq.mass_energy(),
eq.molar_mass_amu(),
eq.doppler_relativistic(2 * u.m),
eq.doppler_optical(2 * u.nm),
eq.doppler_radio(2 * u.Hz),
eq.parallax(),
eq.logarithmic(),
eq.dimensionless_angles(),
eq.spectral() + eq.temperature(),
(
eq.spectral_density(35 * u.nm)
+ eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr)
),
(
eq.spectral()
+ eq.spectral_density(35 * u.nm)
+ eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr)
),
with_H0(),
]
@pytest.mark.parametrize("equiv", get_equivalencies())
@pytest.mark.filterwarnings(
"ignore:`with_H0` is deprecated from `astropy.units.equivalencies` "
"since astropy 5.0 and may be removed in a future version. "
"Use `astropy.cosmology.units.with_H0` instead."
)
def test_equivalencies(tmpdir, equiv):
tree = {"equiv": equiv}
helpers.assert_roundtrip_tree(tree, tmpdir)
|
55bd7f7a861891097a14209caab5a902661b7ed745872df8fcee87fd1bf8f672 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory.
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# See sphinx_astropy.conf for which values are set there.
import configparser
import doctest
import os
import sys
from datetime import datetime
from importlib import metadata
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
# -- Check for missing dependencies -------------------------------------------
missing_requirements = {}
for line in metadata.requires("astropy"):
if 'extra == "docs"' in line:
req = Requirement(line.split(";")[0])
req_package = req.name.lower()
req_specifier = str(req.specifier)
try:
version = metadata.version(req_package)
except metadata.PackageNotFoundError:
missing_requirements[req_package] = req_specifier
if version not in SpecifierSet(req_specifier, prereleases=True):
missing_requirements[req_package] = req_specifier
if missing_requirements:
print(
"The following packages could not be found and are required to "
"build the documentation:"
)
for key, val in missing_requirements.items():
print(f" * {key} {val}")
print('Please install the "docs" requirements.')
sys.exit(1)
from sphinx_astropy.conf.v1 import * # noqa: E402
from sphinx_astropy.conf.v1 import ( # noqa: E402
numpydoc_xref_aliases,
numpydoc_xref_astropy_aliases,
numpydoc_xref_ignore,
rst_epilog,
)
# -- Plot configuration -------------------------------------------------------
plot_rcparams = {}
plot_rcparams["figure.figsize"] = (6, 6)
plot_rcparams["savefig.facecolor"] = "none"
plot_rcparams["savefig.bbox"] = "tight"
plot_rcparams["axes.labelsize"] = "large"
plot_rcparams["figure.subplot.hspace"] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ["png", "svg", "pdf"]
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.7"
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("X.Y.Z")` here.
check_sphinx_version("1.2.1") # noqa: F405
# The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for
# the benefit of other packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping["astropy"] # noqa: F405
# add any custom intersphinx for astropy
# fmt: off
intersphinx_mapping["astropy-dev"] = ("https://docs.astropy.org/en/latest/", None) # noqa: F405
intersphinx_mapping["pyerfa"] = ("https://pyerfa.readthedocs.io/en/stable/", None) # noqa: F405
intersphinx_mapping["pytest"] = ("https://docs.pytest.org/en/stable/", None) # noqa: F405
intersphinx_mapping["ipython"] = ("https://ipython.readthedocs.io/en/stable/", None) # noqa: F405
intersphinx_mapping["pandas"] = ("https://pandas.pydata.org/pandas-docs/stable/", None) # noqa: F405
intersphinx_mapping["sphinx_automodapi"] = ("https://sphinx-automodapi.readthedocs.io/en/stable/", None) # noqa: F405
intersphinx_mapping["packagetemplate"] = ("https://docs.astropy.org/projects/package-template/en/latest/", None) # noqa: F405
intersphinx_mapping["h5py"] = ("https://docs.h5py.org/en/stable/", None) # noqa: F405
intersphinx_mapping["asdf-astropy"] = ("https://asdf-astropy.readthedocs.io/en/latest/", None) # noqa: F405
intersphinx_mapping["fsspec"] = ("https://filesystem-spec.readthedocs.io/en/latest/", None) # noqa: F405
# fmt: on
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append("_templates") # noqa: F405
exclude_patterns.append("changes") # noqa: F405
exclude_patterns.append("_pkgtemplate.rst") # noqa: F405
exclude_patterns.append( # noqa: F405
"**/*.inc.rst"
) # .inc.rst mean *include* files, don't have sphinx process them
# Add any paths that contain templates here, relative to this directory.
if "templates_path" not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append("_templates")
extensions += ["sphinx_changelog"] # noqa: F405
# Grab minversion from setup.cfg
setup_cfg = configparser.ConfigParser()
setup_cfg.read(os.path.join(os.path.pardir, "setup.cfg"))
__minimum_python_version__ = setup_cfg["options"]["python_requires"].replace(">=", "")
project = "Astropy"
min_versions = {}
for line in metadata.requires("astropy"):
req = Requirement(line.split(";")[0])
min_versions[req.name.lower()] = str(req.specifier)
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
with open("common_links.txt") as cl:
rst_epilog += cl.read().format(
minimum_python=__minimum_python_version__, **min_versions
)
# Manually register doctest options since matplotlib 3.5 messed up allowing them
# from pytest-doctestplus
IGNORE_OUTPUT = doctest.register_optionflag("IGNORE_OUTPUT")
REMOTE_DATA = doctest.register_optionflag("REMOTE_DATA")
FLOAT_CMP = doctest.register_optionflag("FLOAT_CMP")
# Whether to create cross-references for the parameter types in the
# Parameters, Other Parameters, Returns and Yields sections of the docstring.
numpydoc_xref_param_type = True
# Words not to cross-reference. Most likely, these are common words used in
# parameter type descriptions that may be confused for classes of the same
# name. The base set comes from sphinx-astropy. We add more here.
numpydoc_xref_ignore.update(
{
"mixin",
"Any", # aka something that would be annotated with `typing.Any`
# needed in subclassing numpy # TODO! revisit
"Arguments",
"Path",
# TODO! not need to ignore.
"flag",
"bits",
}
)
# Mappings to fully qualified paths (or correct ReST references) for the
# aliases/shortcuts used when specifying the types of parameters.
# Numpy provides some defaults
# https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94
# and a base set comes from sphinx-astropy.
# so here we mostly need to define Astropy-specific x-refs
numpydoc_xref_aliases.update(
{
# python & adjacent
"Any": "`~typing.Any`",
"file-like": ":term:`python:file-like object`",
"file": ":term:`python:file object`",
"path-like": ":term:`python:path-like object`",
"module": ":term:`python:module`",
"buffer-like": ":term:buffer-like",
"hashable": ":term:`python:hashable`",
# for matplotlib
"color": ":term:`color`",
# for numpy
"ints": ":class:`python:int`",
# for astropy
"number": ":term:`number`",
"Representation": ":class:`~astropy.coordinates.BaseRepresentation`",
"writable": ":term:`writable file-like object`",
"readable": ":term:`readable file-like object`",
"BaseHDU": ":doc:`HDU </io/fits/api/hdus>`",
}
)
# Add from sphinx-astropy 1) glossary aliases 2) physical types.
numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases)
# Turn off table of contents entries for functions and classes
toc_object_entries = False
# -- Project information ------------------------------------------------------
author = "The Astropy Developers"
copyright = f"2011–{datetime.utcnow().year}, " + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = metadata.version(project)
# The short X.Y version.
version = ".".join(release.split(".")[:2])
# Only include dev docs in dev version.
dev = "dev" in release
if not dev:
exclude_patterns.append("development/*") # noqa: F405
exclude_patterns.append("testhelpers.rst") # noqa: F405
# -- Options for the module index ---------------------------------------------
modindex_common_prefix = ["astropy."]
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
# html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
# html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
# html_theme = None
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f"{project} v{release}"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {"to_be_indexed": ["stable", "latest"], "is_development": dev}
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", project + ".tex", project + " Documentation", author, "manual")
]
latex_logo = "_static/astropy_logo.pdf"
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", project.lower(), project + " Documentation", [author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = "https://github.com/astropy/astropy/issues/"
edit_on_github_branch = "main"
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
# This is not used. See docs/nitpick-exceptions file for the actual listing.
nitpick_ignore = []
for line in open("nitpick-exceptions"):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import warnings
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
"backreferences_dir": "generated/modules", # path to store the module using example template
"filename_pattern": "^((?!skip_).)*$", # execute all examples except those that start with "skip_"
"examples_dirs": f"..{os.sep}examples", # path to the examples scripts
"gallery_dirs": "generated/examples", # path to save gallery generated examples
"reference_url": {
"astropy": None,
"matplotlib": "https://matplotlib.org/stable/",
# The stable numpy search js isn't loadable at the moment (2022-12-07)
# It seems to be valid js but it's not valid json so sphinx wont load it.
"numpy": "https://numpy.org/devdocs/",
},
"abort_on_example_error": True,
}
# Filter out backend-related warnings as described in
# https://github.com/sphinx-gallery/sphinx-gallery/pull/564
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=(
"Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure."
),
)
except ImportError:
sphinx_gallery = None
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = [
"https://journals.aas.org/manuscript-preparation/",
"https://maia.usno.navy.mil/",
"https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer",
"https://aa.usno.navy.mil/publications/docs/Circular_179.php",
"http://data.astropy.org",
"https://doi.org/10.1017/S0251107X00002406", # internal server error
"https://doi.org/10.1017/pasa.2013.31", # internal server error
"https://www.tandfonline.com/", # 403 Client Error: Forbidden
"https://pyfits.readthedocs.io/en/v3.2.1/", # defunct page in CHANGES.rst
r"https://github\.com/astropy/astropy/(?:issues|pull)/\d+",
]
linkcheck_timeout = 180
linkcheck_anchors = False
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ["robots.txt"]
def rstjinja(app, docname, source):
"""Render pages as a jinja template to hide/show dev docs."""
# Make sure we're outputting HTML
if app.builder.format != "html":
return
files_to_render = ["index", "install"]
if docname in files_to_render:
print(f"Jinja rendering {docname}")
rendered = app.builder.templates.render_string(
source[0], app.config.html_context
)
source[0] = rendered
def resolve_astropy_and_dev_reference(app, env, node, contnode):
"""
Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases.
Documentation links in astropy can be set up as intersphinx links so that
affiliate packages do not have to override the docstrings when building
the docs.
If we are building the development docs it is a local ref targeting the
label ``astropy-dev:<label>``, but for stable docs it should be an
intersphinx resolution to the development docs.
See https://github.com/astropy/astropy/issues/11366
"""
# should the node be processed?
reftarget = node.get("reftarget") # str or None
if str(reftarget).startswith("astropy:"):
# This allows Astropy to use intersphinx links to itself and have
# them resolve to local links. Downstream packages will see intersphinx.
# TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented.
process, replace = True, "astropy:"
elif dev and str(reftarget).startswith("astropy-dev:"):
process, replace = True, "astropy-dev:"
else:
process, replace = False, ""
# make link local
if process:
reftype = node.get("reftype")
refdoc = node.get("refdoc", app.env.docname)
# convert astropy intersphinx targets to local links.
# there are a few types of intersphinx link patterns, as described in
# https://docs.readthedocs.io/en/stable/guides/intersphinx.html
reftarget = reftarget.replace(replace, "")
if reftype == "doc": # also need to replace the doc link
node.replace_attr("reftarget", reftarget)
# Delegate to the ref node's original domain/target (typically :ref:)
try:
domain = app.env.domains[node["refdomain"]]
return domain.resolve_xref(
app.env, refdoc, app.builder, reftype, reftarget, node, contnode
)
except Exception:
pass
# Otherwise return None which should delegate to intersphinx
def setup(app):
if sphinx_gallery is None:
msg = (
"The sphinx_gallery extension is not installed, so the "
"gallery will not be built. You will probably see "
"additional warnings about undefined references due "
"to this."
)
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# Generate the page from Jinja template
app.connect("source-read", rstjinja)
# Set this to higher priority than intersphinx; this way when building
# dev docs astropy-dev: targets will go to the local docs instead of the
# intersphinx mapping
app.connect("missing-reference", resolve_astropy_and_dev_reference, priority=400)
|
a9e48ba89f4e1bd927d8f60e85fcc87b5006635915ed31051f28bb4d7424bd77 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
# pylint: disable=invalid-name, protected-access, redefined-outer-name
import abc
import copy
import functools
import inspect
import itertools
import operator
import types
from collections import defaultdict, deque
from inspect import signature
from itertools import chain
import numpy as np
from astropy.nddata.utils import add_array, extract_array
from astropy.table import Table
from astropy.units import Quantity, UnitsError, dimensionless_unscaled
from astropy.units.utils import quantity_asanyarray
from astropy.utils import (
IncompatibleShapeError,
check_broadcast,
find_current_module,
indent,
isiterable,
metadata,
sharedmethod,
)
from astropy.utils.codegen import make_function_with_signature
from .bounding_box import CompoundBoundingBox, ModelBoundingBox
from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline
from .utils import (
_combine_equivalency_dict,
_ConstraintsDict,
_SpecialOperatorsDict,
combine_labels,
get_inputs_and_params,
make_binary_operator_eval,
)
__all__ = [
"Model",
"FittableModel",
"Fittable1DModel",
"Fittable2DModel",
"CompoundModel",
"fix_inputs",
"custom_model",
"ModelDefinitionError",
"bind_bounding_box",
"bind_compound_bounding_box",
]
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
"""
return lambda left, right: CompoundModel(oper, left, right, **kwargs)
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions."""
class _ModelMeta(abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
def __new__(mcls, name, bases, members, **kwds):
# See the docstring for _is_dynamic above
if "_is_dynamic" not in members:
members["_is_dynamic"] = mcls._is_dynamic
opermethods = [
("__add__", _model_oper("+")),
("__sub__", _model_oper("-")),
("__mul__", _model_oper("*")),
("__truediv__", _model_oper("/")),
("__pow__", _model_oper("**")),
("__or__", _model_oper("|")),
("__and__", _model_oper("&")),
("_fix_inputs", _model_oper("fix_inputs")),
]
members["_parameters_"] = {
k: v for k, v in members.items() if isinstance(v, Parameter)
}
for opermethod, opercall in opermethods:
members[opermethod] = opercall
cls = super().__new__(mcls, name, bases, members, **kwds)
param_names = list(members["_parameters_"])
# Need to walk each base MRO to collect all parameter names
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
# Preserve order of definitions
param_names = list(tbase._parameters_) + param_names
# Remove duplicates (arising from redefinition in subclass).
param_names = list(dict.fromkeys(param_names))
if cls._parameters_:
if hasattr(cls, "_param_names"):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(param_names)
else:
cls.param_names = tuple(param_names)
return cls
def __init__(cls, name, bases, members, **kwds):
super().__init__(name, bases, members, **kwds)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
pdict = {}
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
for parname, val in cls._parameters_.items():
pdict[parname] = val
cls._handle_special_methods(members, pdict)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith("_abc_"):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ("__init__", "__call__"):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith("_") or inspect.isabstract(cls))
def rename(cls, name=None, inputs=None, outputs=None):
"""
Creates a copy of this model class with a new name, inputs or outputs.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class 'astropy.modeling.core.SkyRotation'>
Name: SkyRotation (Rotation2D)
N_inputs: 2
N_outputs: 2
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
if name is None:
name = cls.name
if inputs is None:
inputs = cls.inputs
else:
if not isinstance(inputs, tuple):
raise TypeError("Expected 'inputs' to be a tuple of strings.")
elif len(inputs) != len(cls.inputs):
raise ValueError(f"{cls.name} expects {len(cls.inputs)} inputs")
if outputs is None:
outputs = cls.outputs
else:
if not isinstance(outputs, tuple):
raise TypeError("Expected 'outputs' to be a tuple of strings.")
elif len(outputs) != len(cls.outputs):
raise ValueError(f"{cls.name} expects {len(cls.outputs)} outputs")
new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs})
new_cls.__module__ = modname
new_cls.__qualname__ = name
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get("inverse")
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get("bounding_box")
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = ModelBoundingBox.validate(
cls, bounding_box, _preserve_ignore=True
)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of ModelBoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
f"The bounding_box method for {cls.name} is not correctly "
"defined: If defined as a method all arguments to that "
"method (besides self) must be keyword arguments with "
"default values that can be used to compute a default "
"bounding box."
)
kwargs.append((param.name, param.default))
__call__.__signature__ = sig
return type(
f"{cls.name}ModelBoundingBox", (ModelBoundingBox,), {"__call__": __call__}
)
def _handle_special_methods(cls, members, pdict):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, "__qualname__"):
wrapper.__qualname__ = f"{cls.__qualname__}.{wrapper.__name__}"
if (
"__call__" not in members
and "n_inputs" in members
and isinstance(members["n_inputs"], int)
and members["n_inputs"] > 0
):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
args = ("self",)
kwargs = {
"model_set_axis": None,
"with_bounding_box": False,
"fill_value": np.nan,
"equivalencies": None,
"inputs_map": None,
}
new_call = make_function_with_signature(
__call__, args, kwargs, varargs="inputs", varkwargs="new_inputs"
)
# The following makes it look like __call__
# was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if (
"__init__" not in members
and not inspect.isabstract(cls)
and cls._parameters_
):
# Build list of all parameters including inherited ones
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional
# arguments
if all(p.default is not None for p in pdict.values()):
args = ("self",)
kwargs = []
for param_name, param_val in pdict.items():
default = param_val.default
unit = param_val.unit
# If the unit was specified in the parameter but the
# default is not a Quantity, attach the unit to the
# default.
if unit is not None:
default = Quantity(default, unit, copy=False, subok=True)
kwargs.append((param_name, default))
else:
args = ("self",) + tuple(pdict.keys())
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs="kwargs"
)
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
_fix_inputs = _model_oper("fix_inputs")
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif inspect.isabstract(base) or base.__name__.startswith("_"):
break
bases.append(base.name)
if bases:
return f"{cls.name} ({' -> '.join(bases)})"
return cls.name
try:
default_keywords = [
("Name", format_inheritance(cls)),
("N_inputs", cls.n_inputs),
("N_outputs", cls.n_outputs),
]
if cls.param_names:
default_keywords.append(("Fittable parameters", cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append(f"{keyword}: {value}")
return "\n".join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ("eqcons", "ineqcons")
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
n_inputs = 0
"""The number of inputs."""
n_outputs = 0
""" The number of outputs."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
_has_inverse_bounding_box = False
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
# Covariance matrix can be set by fitter if available.
# If cov_matrix is available, then std will set as well
_cov_matrix = None
_stds = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
self._default_inputs_outputs()
if meta is not None:
self.meta = meta
self._name = name
# add parameters to instance level by walking MRO list
mro = self.__class__.__mro__
for cls in mro:
if issubclass(cls, Model):
for parname, val in cls._parameters_.items():
newpar = copy.deepcopy(val)
newpar.model = self
if parname not in self.__dict__:
self.__dict__[parname] = newpar
self._initialize_constraints(kwargs)
kwargs = self._initialize_setters(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_slices()
self._initialize_unit_support()
def _default_inputs_outputs(self):
if self.n_inputs == 1 and self.n_outputs == 1:
self._inputs = ("x",)
self._outputs = ("y",)
elif self.n_inputs == 2 and self.n_outputs == 1:
self._inputs = ("x", "y")
self._outputs = ("z",)
else:
try:
self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs))
self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs))
except TypeError:
# self.n_inputs and self.n_outputs are properties
# This is the case when subclasses of Model do not define
# ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``.
self._inputs = ()
self._outputs = ()
def _initialize_setters(self, kwargs):
"""
This exists to inject defaults for settable properties for models
originating from `custom_model`.
"""
if hasattr(self, "_settable_properties"):
setters = {
name: kwargs.pop(name, default)
for name, default in self._settable_properties.items()
}
for name, value in setters.items():
setattr(self, name, value)
return kwargs
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, val):
if len(val) != self.n_inputs:
raise ValueError(
f"Expected {self.n_inputs} number of inputs, got {len(val)}."
)
self._inputs = val
self._initialize_unit_support()
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, val):
if len(val) != self.n_outputs:
raise ValueError(
f"Expected {self.n_outputs} number of outputs, got {len(val)}."
)
self._outputs = val
@property
def n_inputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``inputs`` as class variables is removed.
if hasattr(self.__class__, "n_inputs") and isinstance(
self.__class__.n_inputs, property
):
try:
return len(self.__class__.inputs)
except TypeError:
try:
return len(self.inputs)
except AttributeError:
return 0
return self.__class__.n_inputs
@property
def n_outputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``outputs`` as class variables is removed.
if hasattr(self.__class__, "n_outputs") and isinstance(
self.__class__.n_outputs, property
):
try:
return len(self.__class__.outputs)
except TypeError:
try:
return len(self.outputs)
except AttributeError:
return 0
return self.__class__.n_outputs
def _calculate_separability_matrix(self):
"""
This is a hook which customises the behavior of modeling.separable.
This allows complex subclasses to customise the separability matrix.
If it returns `NotImplemented` the default behavior is used.
"""
return NotImplemented
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolean value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {
key: self._input_units_strict for key in self.inputs
}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {
key: self._input_units_allow_dimensionless for key in self.inputs
}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
@staticmethod
def _strip_ones(intup):
return tuple(item for item in intup if item != 1)
def __setattr__(self, attr, value):
if isinstance(self, CompoundModel):
param_names = self._param_names
param_names = self.param_names
if param_names is not None and attr in self.param_names:
param = self.__dict__[attr]
value = _tofloat(value)
if param._validator is not None:
param._validator(self, value)
# check consistency with previous shape and size
eshape = self._param_metrics[attr]["shape"]
if eshape == ():
eshape = (1,)
vshape = np.array(value).shape
if vshape == ():
vshape = (1,)
esize = self._param_metrics[attr]["size"]
if np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones(
eshape
):
raise InputParameterError(
f"Value for parameter {attr} does not match shape or size\nexpected"
f" by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})"
)
if param.unit is None:
if isinstance(value, Quantity):
param._unit = value.unit
param.value = value.value
else:
param.value = value
else:
if not isinstance(value, Quantity):
raise UnitsError(
f"The '{param.name}' parameter should be given as a"
" Quantity because it was originally "
"initialized as a Quantity"
)
param._unit = value.unit
param.value = value.value
else:
if attr in ["fittable", "linear"]:
self.__dict__[attr] = value
else:
super().__setattr__(attr, value)
def _pre_evaluate(self, *args, **kwargs):
"""
Model specific input setup that needs to occur prior to model evaluation
"""
# Broadcast inputs into common size
inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs)
# Setup actual model evaluation method
parameters = self._param_sets(raw=True, units=True)
def evaluate(_inputs):
return self.evaluate(*chain(_inputs, parameters))
return evaluate, inputs, broadcasted_shapes, kwargs
def get_bounding_box(self, with_bbox=True):
"""
Return the ``bounding_box`` of a model if it exists or ``None``
otherwise.
Parameters
----------
with_bbox :
The value of the ``with_bounding_box`` keyword argument
when calling the model. Default is `True` for usage when
looking up the model's ``bounding_box`` without risk of error.
"""
bbox = None
if not isinstance(with_bbox, bool) or with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
pass
if isinstance(bbox, CompoundBoundingBox) and not isinstance(
with_bbox, bool
):
bbox = bbox[with_bbox]
return bbox
@property
def _argnames(self):
"""The inputs used to determine input_shape for bounding_box evaluation"""
return self.inputs
def _validate_input_shape(
self, _input, idx, argnames, model_set_axis, check_model_set_axis
):
"""
Perform basic validation of a single model input's shape
-- it has the minimum dimensions for the given model_set_axis
Returns the shape of the input if validation succeeds.
"""
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
f"For model_set_axis={model_set_axis}, all inputs must be at "
f"least {model_set_axis + 1}-dimensional."
)
if input_shape[model_set_axis] != self._n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
f"Input argument '{argname}' does not have the correct dimensions"
f" in model_set_axis={model_set_axis} for a model set with"
f" n_models={self._n_models}."
)
return input_shape
def _validate_input_shapes(self, inputs, argnames, model_set_axis):
"""
Perform basic validation of model inputs
--that they are mutually broadcastable and that they have
the minimum dimensions for the given model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = self._n_models > 1 and model_set_axis is not False
all_shapes = []
for idx, _input in enumerate(inputs):
all_shapes.append(
self._validate_input_shape(
_input, idx, argnames, model_set_axis, check_model_set_axis
)
)
input_shape = check_broadcast(*all_shapes)
if input_shape is None:
raise ValueError(
"All inputs must have identical shapes or must be scalars."
)
return input_shape
def input_shape(self, inputs):
"""Get input shape for bounding_box evaluation"""
return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis)
def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox):
"""
Generic model evaluation routine
Selects and evaluates model with or without bounding_box enforcement
"""
# Evaluate the model using the prepared evaluation method either
# enforcing the bounding_box or not.
bbox = self.get_bounding_box(with_bbox)
if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None:
outputs = bbox.evaluate(evaluate, _inputs, fill_value)
else:
outputs = evaluate(_inputs)
return outputs
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
Model specific post evaluation processing of outputs
"""
if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
return outputs
@property
def bbox_with_units(self):
return not isinstance(self, CompoundModel)
def __call__(self, *args, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
# Turn any keyword arguments into positional arguments.
args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs)
# Read model evaluation related parameters
with_bbox = kwargs.pop("with_bounding_box", False)
fill_value = kwargs.pop("fill_value", np.nan)
# prepare for model evaluation (overridden in CompoundModel)
evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(
*args, **kwargs
)
outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox)
# post-process evaluation results (overridden in CompoundModel)
return self._post_evaluate(
inputs, outputs, broadcasted_shapes, with_bbox, **kwargs
)
def _get_renamed_inputs_as_positional(self, *args, **kwargs):
def _keyword2positional(kwargs):
# Inputs were passed as keyword (not positional) arguments.
# Because the signature of the ``__call__`` is defined at
# the class level, the name of the inputs cannot be changed at
# the instance level and the old names are always present in the
# signature of the method. In order to use the new names of the
# inputs, the old names are taken out of ``kwargs``, the input
# values are sorted in the order of self.inputs and passed as
# positional arguments to ``__call__``.
# These are the keys that are always present as keyword arguments.
keys = [
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
]
new_inputs = {}
# kwargs contain the names of the new inputs + ``keys``
allkeys = list(kwargs.keys())
# Remove the names of the new inputs from kwargs and save them
# to a dict ``new_inputs``.
for key in allkeys:
if key not in keys:
new_inputs[key] = kwargs[key]
del kwargs[key]
return new_inputs, kwargs
n_args = len(args)
new_inputs, kwargs = _keyword2positional(kwargs)
n_all_args = n_args + len(new_inputs)
if n_all_args < self.n_inputs:
raise ValueError(
f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}"
)
elif n_all_args > self.n_inputs:
raise ValueError(
f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}"
)
if n_args == 0:
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
new_args.append(new_inputs[k])
elif n_args != self.n_inputs:
# Some inputs are passed as positional, others as keyword arguments.
args = list(args)
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
if k in new_inputs:
new_args.append(new_inputs[k])
else:
new_args.append(args[0])
del args[0]
else:
new_args = args
return new_args, kwargs
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on :ref:`astropy:modeling-model-sets`
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
self._parameters_to_array()
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
f"parameters array: {e!r}"
)
self._array_to_parameters()
@property
def sync_constraints(self):
"""
This is a boolean property that indicates whether or not accessing constraints
automatically check the constituent models current values. It defaults to True
on creation of a model, but for fitting purposes it should be set to False
for performance reasons.
"""
if not hasattr(self, "_sync_constraints"):
self._sync_constraints = True
return self._sync_constraints
@sync_constraints.setter
def sync_constraints(self, value):
if not isinstance(value, bool):
raise ValueError("sync_constraints only accepts True or False as values")
self._sync_constraints = value
@property
def fixed(self):
"""
A ``dict`` mapping parameter names to their fixed constraint.
"""
if not hasattr(self, "_fixed") or self.sync_constraints:
self._fixed = _ConstraintsDict(self, "fixed")
return self._fixed
@property
def bounds(self):
"""
A ``dict`` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
if not hasattr(self, "_bounds") or self.sync_constraints:
self._bounds = _ConstraintsDict(self, "bounds")
return self._bounds
@property
def tied(self):
"""
A ``dict`` mapping parameter names to their tied constraint.
"""
if not hasattr(self, "_tied") or self.sync_constraints:
self._tied = _ConstraintsDict(self, "tied")
return self._tied
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._mconstraints["eqcons"]
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._mconstraints["ineqcons"]
def has_inverse(self):
"""
Returns True if the model has an analytic or user
inverse defined.
"""
try:
self.inverse
except NotImplementedError:
return False
return True
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
result = self._inverse()
if result is not NotImplemented:
if not self._has_inverse_bounding_box:
result.bounding_box = None
return result
raise NotImplementedError(
"No analytical or user-supplied inverse transform "
"has been implemented for this model."
)
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse."
)
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
try:
del self._user_inverse
except AttributeError:
pass
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
raise `NotImplementedError` for no bounding_box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`astropy:bounding-boxes`
The limits are ordered according to the `numpy` ``'C'`` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: No bounding box is defined for this model
(note: the bounding box was explicitly disabled for this model;
use `del model.bounding_box` to restore the default bounding box,
if one is defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model)."
)
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError("No bounding box is defined for this model.")
elif isinstance(self._bounding_box, ModelBoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return ModelBoundingBox.validate(self, self._bounding_box())
else:
# The only other allowed possibility is that it's a ModelBoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), model=self)()
return self._bounding_box(bounding_box, model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif isinstance(bounding_box, CompoundBoundingBox) or isinstance(
bounding_box, dict
):
cls = CompoundBoundingBox
elif isinstance(self._bounding_box, type) and issubclass(
self._bounding_box, ModelBoundingBox
):
cls = self._bounding_box
else:
cls = ModelBoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
def set_slice_args(self, *args):
if isinstance(self._user_bounding_box, CompoundBoundingBox):
self._user_bounding_box.slice_args = args
else:
raise RuntimeError("The bounding_box for this model is not compound")
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def cov_matrix(self):
"""
Fitter should set covariance matrix, if available.
"""
return self._cov_matrix
@cov_matrix.setter
def cov_matrix(self, cov):
self._cov_matrix = cov
unfix_untied_params = [
p
for p in self.param_names
if (self.fixed[p] is False) and (self.tied[p] is False)
]
if type(cov) == list: # model set
param_stds = []
for c in cov:
param_stds.append(
[np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)]
)
for p, param_name in enumerate(unfix_untied_params):
par = getattr(self, param_name)
par.std = [item[p] for item in param_stds]
setattr(self, param_name, par)
else:
param_stds = [
np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)
]
for param_name in unfix_untied_params:
par = getattr(self, param_name)
par.std = param_stds.pop(0)
setattr(self, param_name, par)
@property
def stds(self):
"""
Standard deviation of parameters, if covariance matrix is available.
"""
return self._stds
@stds.setter
def stds(self, stds):
self._stds = stds
@property
def separable(self):
"""A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
raise NotImplementedError(
'The "separable" property is not defined for '
f"model {self.__class__.__name__}"
)
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have
been converted to the right units for the data, then the units have
been stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not
necessarily the units of the input data, but are derived from them.
Model subclasses that want fitting to work in the presence of
quantities need to define a ``_parameter_units_for_data_units`` method
that takes the input and output units (as two dictionaries) and
returns a dictionary giving the target units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, CompoundModel):
model.strip_units_from_tree()
return model
def output_units(self, **kwargs):
"""
Return a dictionary of output units for this model given a dictionary
of fitting inputs and outputs
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
This method will force extra model evaluations, which maybe computationally
expensive. To avoid this, one can add a return_units property to the model,
see :ref:`astropy:models_return_units`.
"""
units = self.return_units
if units is None or units == {}:
inputs = {inp: kwargs[inp] for inp in self.inputs}
values = self(**inputs)
if self.n_outputs == 1:
values = (values,)
units = {
out: getattr(values[index], "unit", dimensionless_unscaled)
for index, out in enumerate(self.outputs)
}
return units
def strip_units_from_tree(self):
for item in self._leaflist:
for parname in item.param_names:
par = getattr(item, parname)
par._set_unit(None, force=True)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units
of the input data, but are derived from them. Model subclasses that
want fitting to work in the presence of quantities need to define a
``_parameter_units_for_data_units`` method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly,
# hence the call to ``_set_unit``.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
for param in self.param_names:
if getattr(self, param).unit is not None:
return True
else:
return False
@property
def _supports_unit_fitting(self):
# If the model has a ``_parameter_units_for_data_units`` method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, "_parameter_units_for_data_units")
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel,
# important when using add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, "_input_units"):
return self._input_units
elif hasattr(self.evaluate, "__annotations__"):
annotations = self.evaluate.__annotations__.copy()
annotations.pop("return", None)
if annotations:
# If there are not annotations for all inputs this will error.
return {name: annotations[name] for name in self.inputs}
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the
output of evaluate should be in, and returns a dictionary mapping
outputs to units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, "_return_units"):
return self._return_units
elif hasattr(self.evaluate, "__annotations__"):
return self.evaluate.__annotations__.get("return", None)
else:
# None means any unit is accepted
return None
def _prepare_inputs_single_model(self, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if self.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
f"self input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} cannot be broadcast with parameter"
f" {param.name!r} of shape {param.shape!r}."
)
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if self.n_outputs > self.n_inputs:
extra_outputs = self.n_outputs - self.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_self)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
@staticmethod
def _remove_axes_from_shape(shape, axis):
"""
Given a shape tuple as the first input, construct a new one by removing
that particular axis from the shape and all preceding axes. Negative axis
numbers are permittted, where the axis is relative to the last axis.
"""
if len(shape) == 0:
return shape
if axis < 0:
axis = len(shape) + axis
return shape[:axis] + shape[axis + 1 :]
if axis >= len(shape):
axis = len(shape) - 1
shape = shape[axis + 1 :]
return shape
def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs):
reshaped = []
pivots = []
model_set_axis_param = self.model_set_axis # needed to reshape param
for idx, _input in enumerate(inputs):
max_param_shape = ()
if self._n_models > 1 and model_set_axis_input is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (
_input.shape[:model_set_axis_input]
+ _input.shape[model_set_axis_input + 1 :]
)
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(
input_shape,
self._remove_axes_from_shape(param.shape, model_set_axis_param),
)
except IncompatibleShapeError:
raise ValueError(
f"Model input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} "
f"cannot be broadcast with parameter {param.name!r} of shape "
f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}."
)
if len(param.shape) - 1 > len(max_param_shape):
max_param_shape = self._remove_axes_from_shape(
param.shape, model_set_axis_param
)
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis_input is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model_set_axis_param
else:
pivot = input_ndim - len(max_param_shape)
new_shape = _input.shape[:pivot] + (1,) + _input.shape[pivot:]
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = self.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (
_input.shape[: pivot + 1] + new_axes + _input.shape[pivot + 1 :]
)
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if self.n_inputs < self.n_outputs:
pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs))
return reshaped, (pivots,)
def prepare_inputs(
self, *inputs, model_set_axis=None, equivalencies=None, **kwargs
):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
self._validate_input_shapes(inputs, self.inputs, model_set_axis)
inputs_map = kwargs.get("inputs_map", None)
inputs = self._validate_input_units(inputs, equivalencies, inputs_map)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if self._n_models == 1:
return self._prepare_inputs_single_model(params, inputs, **kwargs)
else:
return self._prepare_inputs_model_set(
params, inputs, model_set_axis, **kwargs
)
def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# If a leaflist is provided that means this is in the context of
# a compound model and it is necessary to create the appropriate
# alias for the input coordinate name for the equivalencies dict
if inputs_map:
edict = {}
for mod, mapping in inputs_map:
if self is mod:
edict[mapping[0]] = equivalencies[mapping[1]]
else:
edict = equivalencies
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
self.inputs, edict, self.input_units_equivalencies
)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(
input_unit, equivalencies=input_units_equivalencies[input_name]
):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is
# because some equivalencies are non-linear, and
# we need to be sure that we evaluate the model in
# its own frame of reference. If input_units_strict
# is set, we also need to convert to the input units.
if (
len(input_units_equivalencies) > 0
or self.input_units_strict[input_name]
):
inputs[i] = inputs[i].to(
input_unit,
equivalencies=input_units_equivalencies[input_name],
)
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
"could not be converted to "
"required dimensionless "
"input"
)
else:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
" could not be "
"converted to required input"
f" units of {input_unit} ({input_unit.physical_type})"
)
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (
not self.input_units_allow_dimensionless[input_name]
and input_unit is not dimensionless_unscaled
and input_unit is not None
):
if np.any(inputs[i] != 0):
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}',"
" (dimensionless), could not be converted to required "
f"input units of {input_unit} "
f"({input_unit.physical_type})"
)
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple(
Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)
)
return outputs
@staticmethod
def _prepare_output_single_model(output, broadcast_shape):
if broadcast_shape is not None:
if not broadcast_shape:
return output.item()
else:
try:
return output.reshape(broadcast_shape)
except ValueError:
try:
return output.item()
except ValueError:
return output
return output
def _prepare_outputs_single_model(self, outputs, broadcasted_shapes):
outputs = list(outputs)
for idx, output in enumerate(outputs):
try:
broadcast_shape = check_broadcast(*broadcasted_shapes[0])
except (IndexError, TypeError):
broadcast_shape = broadcasted_shapes[0][idx]
outputs[idx] = self._prepare_output_single_model(output, broadcast_shape)
return tuple(outputs)
def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis):
pivots = broadcasted_shapes[0]
# If model_set_axis = False was passed then use
# self._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = self.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot, model_set_axis)
return tuple(outputs)
def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs):
model_set_axis = kwargs.get("model_set_axis", None)
if len(self) == 1:
return self._prepare_outputs_single_model(outputs, broadcasted_shapes)
else:
return self._prepare_outputs_model_set(
outputs, broadcasted_shapes, model_set_axis
)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return self.copy()
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
def coerce_units(
self,
input_units=None,
return_units=None,
input_units_equivalencies=None,
input_units_allow_dimensionless=False,
):
"""
Attach units to this (unitless) model.
Parameters
----------
input_units : dict or tuple, optional
Input units to attach. If dict, each key is the name of a model input,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.inputs`.
return_units : dict or tuple, optional
Output units to attach. If dict, each key is the name of a model output,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.outputs`.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : bool or dict, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
Returns
-------
`CompoundModel`
A `CompoundModel` composed of the current model plus
`~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units.
Raises
------
ValueError
If the current model already has units.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True)
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
from .mappings import UnitsMapping
result = self
if input_units is not None:
if self.input_units is not None:
model_units = self.input_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.inputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify input_units for model with existing input units"
)
if isinstance(input_units, dict):
if input_units.keys() != set(self.inputs):
message = (
f"""input_units keys ({", ".join(input_units.keys())}) """
f"""do not match model inputs ({", ".join(self.inputs)})"""
)
raise ValueError(message)
input_units = [input_units[i] for i in self.inputs]
if len(input_units) != self.n_inputs:
message = (
"input_units length does not match n_inputs: "
f"expected {self.n_inputs}, received {len(input_units)}"
)
raise ValueError(message)
mapping = tuple(
(unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units)
)
input_mapping = UnitsMapping(
mapping,
input_units_equivalencies=input_units_equivalencies,
input_units_allow_dimensionless=input_units_allow_dimensionless,
)
input_mapping.inputs = self.inputs
input_mapping.outputs = self.inputs
result = input_mapping | result
if return_units is not None:
if self.return_units is not None:
model_units = self.return_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.outputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify return_units for model "
"with existing output units"
)
if isinstance(return_units, dict):
if return_units.keys() != set(self.outputs):
message = (
f"""return_units keys ({", ".join(return_units.keys())}) """
f"""do not match model outputs ({", ".join(self.outputs)})"""
)
raise ValueError(message)
return_units = [return_units[i] for i in self.outputs]
if len(return_units) != self.n_outputs:
message = (
"return_units length does not match n_outputs: "
f"expected {self.n_outputs}, received {len(return_units)}"
)
raise ValueError(message)
mapping = tuple(
(model_units.get(i), unit)
for i, unit in zip(self.outputs, return_units)
)
return_mapping = UnitsMapping(mapping)
return_mapping.inputs = self.outputs
return_mapping.outputs = self.outputs
result = result | return_mapping
return result
@property
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
for ckey, cvalue in values.items():
param = getattr(self, ckey)
setattr(param, constraint, cvalue)
self._mconstraints = {}
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._mconstraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
n_models = kwargs.pop("n_models", None)
if not (
n_models is None
or (isinstance(n_models, (int, np.integer)) and n_models >= 1)
):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
f"(got {n_models!r})"
)
model_set_axis = kwargs.pop("model_set_axis", None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (
model_set_axis is False
or np.issubdtype(type(model_set_axis), np.integer)
):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
f"model in a set of models (got {model_set_axis!r})."
)
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = set()
if len(args) > len(self.param_names):
raise TypeError(
f"{self.__class__.__name__}.__init__() takes at most "
f"{len(self.param_names)} positional arguments ({len(args)} given)"
)
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
param_name = self.param_names[idx]
params.add(param_name)
if not isinstance(arg, Parameter):
value = quantity_asanyarray(arg, dtype=float)
else:
value = arg
self._initialize_parameter_value(param_name, value)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
f"{self.__class__.__name__}.__init__() got multiple values for"
f" parameter {param_name!r}"
)
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray
# because if any of the arguments are quantities, we need
# to return a Quantity object not a plain Numpy array.
value = quantity_asanyarray(value, dtype=float)
params.add(param_name)
self._initialize_parameter_value(param_name, value)
# Now deal with case where param_name is not supplied by args or kwargs
for param_name in self.param_names:
if param_name not in params:
self._initialize_parameter_value(param_name, None)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
f"{self.__class__.__name__}.__init__() got an unrecognized"
f" parameter {kwarg!r}"
)
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name in self.param_names:
value = getattr(self, name)
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension at least"
f" {min_ndim} for model_set_axis={model_set_axis} (the value"
f" given for {name!r} is only {param_ndim}-dimensional)"
)
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
f"Inconsistent dimensions for parameter {name!r} for"
f" {n_models} model sets. The length of axis"
f" {model_set_axis} must be the same for all input parameter"
" values"
)
self._check_param_broadcast(max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(None)
self._n_models = n_models
# now validate parameters
for name in params:
param = getattr(self, name)
if param._validator is not None:
param._validator(self, param.value)
def _initialize_parameter_value(self, param_name, value):
"""Mostly deals with consistency checks and determining unit issues."""
if isinstance(value, Parameter):
self.__dict__[param_name] = value
return
param = getattr(self, param_name)
# Use default if value is not provided
if value is None:
default = param.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
f"{self.__class__.__name__}.__init__() requires a value for "
f"parameter {param_name!r}"
)
value = default
unit = param.unit
else:
if isinstance(value, Quantity):
unit = value.unit
value = value.value
else:
unit = None
if unit is None and param.unit is not None:
raise InputParameterError(
f"{self.__class__.__name__}.__init__() requires a Quantity for"
f" parameter {param_name!r}"
)
param._unit = unit
param._set_unit(unit, force=True)
param.internal_unit = None
if param._setter is not None:
if unit is not None:
_val = param._setter(value * unit)
else:
_val = param._setter(value)
if isinstance(_val, Quantity):
param.internal_unit = _val.unit
param._internal_value = np.array(_val.value)
else:
param.internal_unit = None
param._internal_value = np.array(_val)
else:
param._value = np.array(value)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
def _parameters_to_array(self):
# Now set the parameter values (this will also fill
# self._parameters)
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = param.value
if not isinstance(value, np.ndarray):
value = np.array([value])
self._parameters[param_metrics[name]["slice"]] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
def _array_to_parameters(self):
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = self._parameters[param_metrics[name]["slice"]]
value.shape = param_metrics[name]["shape"]
param.value = value
def _check_param_broadcast(self, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
model_set_axis = self._model_set_axis
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (
param_shape[: model_set_axis + 1]
+ new_axes
+ param_shape[model_set_axis + 1 :]
)
self._param_metrics[name]["broadcast_shape"] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = self.param_names[shape_a_idx]
param_b = self.param_names[shape_b_idx]
raise InputParameterError(
f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with "
f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules."
)
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw and param._setter:
value = param._internal_value
else:
value = param.value
broadcast_shape = self._param_metrics[name].get("broadcast_shape")
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and param.internal_unit is not None:
unit = param.internal_unit
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit, subok=True)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
parts = [repr(a) for a in args]
parts.extend(
f"{name}={param_repr_oneline(getattr(self, name))}"
for name in self.param_names
)
if self.name is not None:
parts.append(f"name={self.name!r}")
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] == value:
continue
parts.append(f"{kwarg}={value!r}")
if len(self) > 1:
parts.append(f"n_models={len(self)}")
return f"<{self.__class__.__name__}({', '.join(parts)})>"
def _format_str(self, keywords=[], defaults={}):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
("Model", self.__class__.__name__),
("Name", self.name),
("Inputs", self.inputs),
("Outputs", self.outputs),
("Model set size", len(self)),
]
parts = [
f"{keyword}: {value}"
for keyword, value in default_keywords
if value is not None
]
for keyword, value in keywords:
if keyword.lower() in defaults and defaults[keyword.lower()] == value:
continue
parts.append(f"{keyword}: {value}")
parts.append("Parameters:")
if len(self) == 1:
columns = [[getattr(self, name).value] for name in self.param_names]
else:
columns = [getattr(self, name).value for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return "\n".join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 1
n_outputs = 1
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 2
n_outputs = 1
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params), f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (
lambda inputs, params: (
f[0](inputs[: f[1]], params) + g[0](inputs[f[1] :], params)
),
f[1] + g[1],
f[2] + g[2],
)
BINARY_OPERATORS = {
"+": _make_arithmetic_operator(operator.add),
"-": _make_arithmetic_operator(operator.sub),
"*": _make_arithmetic_operator(operator.mul),
"/": _make_arithmetic_operator(operator.truediv),
"**": _make_arithmetic_operator(operator.pow),
"|": _composition_operator,
"&": _join_operator,
}
SPECIAL_OPERATORS = _SpecialOperatorsDict()
def _add_special_operator(sop_name, sop):
return SPECIAL_OPERATORS.add(sop_name, sop)
class CompoundModel(Model):
"""
Base class for compound models.
While it can be used directly, the recommended way
to combine models is through the model operators.
"""
def __init__(self, op, left, right, name=None):
self.__dict__["_param_names"] = None
self._n_submodels = None
self.op = op
self.left = left
self.right = right
self._bounding_box = None
self._user_bounding_box = None
self._leaflist = None
self._tdict = None
self._parameters = None
self._parameters_ = None
self._param_metrics = None
if op != "fix_inputs" and len(left) != len(right):
raise ValueError("Both operands must have equal values for n_models")
self._n_models = len(left)
if op != "fix_inputs" and (
(left.model_set_axis != right.model_set_axis) or left.model_set_axis
): # not False and not 0
raise ValueError(
"model_set_axis must be False or 0 and consistent for operands"
)
self._model_set_axis = left.model_set_axis
if op in ["+", "-", "*", "/", "**"] or op in SPECIAL_OPERATORS:
if left.n_inputs != right.n_inputs or left.n_outputs != right.n_outputs:
raise ModelDefinitionError(
"Both operands must match numbers of inputs and outputs"
)
self.n_inputs = left.n_inputs
self.n_outputs = left.n_outputs
self.inputs = left.inputs
self.outputs = left.outputs
elif op == "&":
self.n_inputs = left.n_inputs + right.n_inputs
self.n_outputs = left.n_outputs + right.n_outputs
self.inputs = combine_labels(left.inputs, right.inputs)
self.outputs = combine_labels(left.outputs, right.outputs)
elif op == "|":
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |:"
f" {left.name} (n_inputs={left.n_inputs},"
f" n_outputs={left.n_outputs}) and"
f" {right.name} (n_inputs={right.n_inputs},"
f" n_outputs={right.n_outputs}); n_outputs for the left-hand model"
" must match n_inputs for the right-hand model."
)
self.n_inputs = left.n_inputs
self.n_outputs = right.n_outputs
self.inputs = left.inputs
self.outputs = right.outputs
elif op == "fix_inputs":
if not isinstance(left, Model):
raise ValueError(
'First argument to "fix_inputs" must be an instance of '
"an astropy Model."
)
if not isinstance(right, dict):
raise ValueError(
'Expected a dictionary for second argument of "fix_inputs".'
)
# Dict keys must match either possible indices
# for model on left side, or names for inputs.
self.n_inputs = left.n_inputs - len(right)
# Assign directly to the private attribute (instead of using the setter)
# to avoid asserting the new number of outputs matches the old one.
self._outputs = left.outputs
self.n_outputs = left.n_outputs
newinputs = list(left.inputs)
keys = right.keys()
input_ind = []
for key in keys:
if np.issubdtype(type(key), np.integer):
if key >= left.n_inputs or key < 0:
raise ValueError(
"Substitution key integer value "
"not among possible input choices."
)
if key in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(key)
elif isinstance(key, str):
if key not in left.inputs:
raise ValueError(
"Substitution key string not among possible input choices."
)
# Check to see it doesn't match positional
# specification.
ind = left.inputs.index(key)
if ind in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(ind)
# Remove substituted inputs
input_ind.sort()
input_ind.reverse()
for ind in input_ind:
del newinputs[ind]
self.inputs = tuple(newinputs)
# Now check to see if the input model has bounding_box defined.
# If so, remove the appropriate dimensions and set it for this
# instance.
try:
self.bounding_box = self.left.bounding_box.fix_inputs(self, right)
except NotImplementedError:
pass
else:
raise ModelDefinitionError("Illegal operator: ", self.op)
self.name = name
self._fittable = None
self.fit_deriv = None
self.col_fit_deriv = None
if op in ("|", "+", "-"):
self.linear = left.linear and right.linear
else:
self.linear = False
self.eqcons = []
self.ineqcons = []
self.n_left_params = len(self.left.parameters)
self._map_parameters()
def _get_left_inputs_from_args(self, args):
return args[: self.left.n_inputs]
def _get_right_inputs_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs : self.left.n_inputs + self.right.n_inputs]
elif op == "|" or op == "fix_inputs":
return None
else:
return args[: self.left.n_inputs]
def _get_left_params_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
n_inputs = self.left.n_inputs + self.right.n_inputs
return args[n_inputs : n_inputs + self.n_left_params]
else:
return args[self.left.n_inputs : self.left.n_inputs + self.n_left_params]
def _get_right_params_from_args(self, args):
op = self.op
if op == "fix_inputs":
return None
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params :]
else:
return args[self.left.n_inputs + self.n_left_params :]
def _get_kwarg_model_parameters_as_positional(self, args, kwargs):
# could do it with inserts but rebuilding seems like simpilist way
# TODO: Check if any param names are in kwargs maybe as an intersection of sets?
if self.op == "&":
new_args = list(args[: self.left.n_inputs + self.right.n_inputs])
args_pos = self.left.n_inputs + self.right.n_inputs
else:
new_args = list(args[: self.left.n_inputs])
args_pos = self.left.n_inputs
for param_name in self.param_names:
kw_value = kwargs.pop(param_name, None)
if kw_value is not None:
value = kw_value
else:
try:
value = args[args_pos]
except IndexError:
raise IndexError("Missing parameter or input")
args_pos += 1
new_args.append(value)
return new_args, kwargs
def _apply_operators_to_value_lists(self, leftval, rightval, **kw):
op = self.op
if op == "+":
return binary_operation(operator.add, leftval, rightval)
elif op == "-":
return binary_operation(operator.sub, leftval, rightval)
elif op == "*":
return binary_operation(operator.mul, leftval, rightval)
elif op == "/":
return binary_operation(operator.truediv, leftval, rightval)
elif op == "**":
return binary_operation(operator.pow, leftval, rightval)
elif op == "&":
if not isinstance(leftval, tuple):
leftval = (leftval,)
if not isinstance(rightval, tuple):
rightval = (rightval,)
return leftval + rightval
elif op in SPECIAL_OPERATORS:
return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval)
else:
raise ModelDefinitionError("Unrecognized operator {op}")
def evaluate(self, *args, **kw):
op = self.op
args, kw = self._get_kwarg_model_parameters_as_positional(args, kw)
left_inputs = self._get_left_inputs_from_args(args)
left_params = self._get_left_params_from_args(args)
if op == "fix_inputs":
pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs)))
fixed_inputs = {
key if np.issubdtype(type(key), np.integer) else pos_index[key]: value
for key, value in self.right.items()
}
left_inputs = [
fixed_inputs[ind] if ind in fixed_inputs.keys() else inp
for ind, inp in enumerate(left_inputs)
]
leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params))
if op == "fix_inputs":
return leftval
right_inputs = self._get_right_inputs_from_args(args)
right_params = self._get_right_params_from_args(args)
if op == "|":
if isinstance(leftval, tuple):
return self.right.evaluate(*itertools.chain(leftval, right_params))
else:
return self.right.evaluate(leftval, *right_params)
else:
rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params))
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
@property
def n_submodels(self):
if self._leaflist is None:
self._make_leaflist()
return len(self._leaflist)
@property
def submodel_names(self):
"""Return the names of submodels in a ``CompoundModel``."""
if self._leaflist is None:
self._make_leaflist()
names = [item.name for item in self._leaflist]
nonecount = 0
newnames = []
for item in names:
if item is None:
newnames.append(f"None_{nonecount}")
nonecount += 1
else:
newnames.append(item)
return tuple(newnames)
def both_inverses_exist(self):
"""
if both members of this compound model have inverses return True
"""
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"CompoundModel.both_inverses_exist is deprecated. Use has_inverse instead.",
AstropyDeprecationWarning,
)
try:
self.left.inverse
self.right.inverse
except NotImplementedError:
return False
return True
def _pre_evaluate(self, *args, **kwargs):
"""
CompoundModel specific input setup that needs to occur prior to
model evaluation.
Note
----
All of the _pre_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
# If equivalencies are provided, necessary to map parameters and pass
# the leaflist as a keyword input for use by model evaluation so that
# the compound model input names can be matched to the model input
# names.
if "equivalencies" in kwargs:
# Restructure to be useful for the individual model lookup
kwargs["inputs_map"] = [
(value[0], (value[1], key)) for key, value in self.inputs_map().items()
]
# Setup actual model evaluation method
def evaluate(_inputs):
return self._evaluate(*_inputs, **kwargs)
return evaluate, args, None, kwargs
@property
def _argnames(self):
"""
No inputs should be used to determine input_shape when handling compound models
"""
return ()
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
CompoundModel specific post evaluation processing of outputs
Note
----
All of the _post_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1:
return outputs[0]
return outputs
def _evaluate(self, *args, **kw):
op = self.op
if op != "fix_inputs":
if op != "&":
leftval = self.left(*args, **kw)
if op != "|":
rightval = self.right(*args, **kw)
else:
rightval = None
else:
leftval = self.left(*(args[: self.left.n_inputs]), **kw)
rightval = self.right(*(args[self.left.n_inputs :]), **kw)
if op != "|":
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
elif op == "|":
if isinstance(leftval, tuple):
return self.right(*leftval, **kw)
else:
return self.right(leftval, **kw)
else:
subs = self.right
newargs = list(args)
subinds = []
subvals = []
for key in subs.keys():
if np.issubdtype(type(key), np.integer):
subinds.append(key)
elif isinstance(key, str):
ind = self.left.inputs.index(key)
subinds.append(ind)
subvals.append(subs[key])
# Turn inputs specified in kw into positional indices.
# Names for compound inputs do not propagate to sub models.
kwind = []
kwval = []
for kwkey in list(kw.keys()):
if kwkey in self.inputs:
ind = self.inputs.index(kwkey)
if ind < len(args):
raise ValueError(
"Keyword argument duplicates positional value supplied."
)
kwind.append(ind)
kwval.append(kw[kwkey])
del kw[kwkey]
# Build new argument list
# Append keyword specified args first
if kwind:
kwargs = list(zip(kwind, kwval))
kwargs.sort()
kwindsorted, kwvalsorted = list(zip(*kwargs))
newargs = newargs + list(kwvalsorted)
if subinds:
subargs = list(zip(subinds, subvals))
subargs.sort()
# subindsorted, subvalsorted = list(zip(*subargs))
# The substitutions must be inserted in order
for ind, val in subargs:
newargs.insert(ind, val)
return self.left(*newargs, **kw)
@property
def param_names(self):
"""An ordered list of parameter names."""
return self._param_names
def _make_leaflist(self):
tdict = {}
leaflist = []
make_subtree_dict(self, "", tdict, leaflist)
self._leaflist = leaflist
self._tdict = tdict
def __getattr__(self, name):
"""
If someone accesses an attribute not already defined, map the
parameters, and then see if the requested attribute is one of
the parameters
"""
# The following test is needed to avoid infinite recursion
# caused by deepcopy. There may be other such cases discovered.
if name == "__setstate__":
raise AttributeError
if name in self._param_names:
return self.__dict__[name]
else:
raise AttributeError(f'Attribute "{name}" not found')
def __getitem__(self, index):
if self._leaflist is None:
self._make_leaflist()
leaflist = self._leaflist
tdict = self._tdict
if isinstance(index, slice):
if index.step:
raise ValueError("Steps in slices not supported for compound models")
if index.start is not None:
if isinstance(index.start, str):
start = self._str_index_to_int(index.start)
else:
start = index.start
else:
start = 0
if index.stop is not None:
if isinstance(index.stop, str):
stop = self._str_index_to_int(index.stop)
else:
stop = index.stop - 1
else:
stop = len(leaflist) - 1
if index.stop == 0:
raise ValueError("Slice endpoint cannot be 0")
if start < 0:
start = len(leaflist) + start
if stop < 0:
stop = len(leaflist) + stop
# now search for matching node:
if stop == start: # only single value, get leaf instead in code below
index = start
else:
for key in tdict:
node, leftind, rightind = tdict[key]
if leftind == start and rightind == stop:
return node
raise IndexError("No appropriate subtree matches slice")
if np.issubdtype(type(index), np.integer):
return leaflist[index]
elif isinstance(index, str):
return leaflist[self._str_index_to_int(index)]
else:
raise TypeError("index must be integer, slice, or model name string")
def _str_index_to_int(self, str_index):
# Search through leaflist for item with that name
found = []
for nleaf, leaf in enumerate(self._leaflist):
if getattr(leaf, "name", None) == str_index:
found.append(nleaf)
if len(found) == 0:
raise IndexError(f"No component with name '{str_index}' found")
if len(found) > 1:
raise IndexError(
f"Multiple components found using '{str_index}' as name\n"
f"at indices {found}"
)
return found[0]
@property
def n_inputs(self):
"""The number of inputs of a model."""
return self._n_inputs
@n_inputs.setter
def n_inputs(self, value):
self._n_inputs = value
@property
def n_outputs(self):
"""The number of outputs of a model."""
return self._n_outputs
@n_outputs.setter
def n_outputs(self, value):
self._n_outputs = value
@property
def eqcons(self):
return self._eqcons
@eqcons.setter
def eqcons(self, value):
self._eqcons = value
@property
def ineqcons(self):
return self._eqcons
@ineqcons.setter
def ineqcons(self, value):
self._eqcons = value
def traverse_postorder(self, include_operator=False):
"""Postorder traversal of the CompoundModel tree."""
res = []
if isinstance(self.left, CompoundModel):
res = res + self.left.traverse_postorder(include_operator)
else:
res = res + [self.left]
if isinstance(self.right, CompoundModel):
res = res + self.right.traverse_postorder(include_operator)
else:
res = res + [self.right]
if include_operator:
res.append(self.op)
else:
res.append(self)
return res
def _format_expression(self, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: f"[{i}]"
for node in self.traverse_postorder():
if not isinstance(node, CompoundModel):
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
right = operands.pop()
left = operands.pop()
if node.op in OPERATOR_PRECEDENCE:
oper_order = OPERATOR_PRECEDENCE[node.op]
if isinstance(node, CompoundModel):
if (
isinstance(node.left, CompoundModel)
and OPERATOR_PRECEDENCE[node.left.op] < oper_order
):
left = f"({left})"
if (
isinstance(node.right, CompoundModel)
and OPERATOR_PRECEDENCE[node.right.op] < oper_order
):
right = f"({right})"
operands.append(" ".join((left, node.op, right)))
else:
left = f"(({left}),"
right = f"({right}))"
operands.append(" ".join((node.op[0], left, right)))
return "".join(operands)
def _format_components(self):
if self._parameters_ is None:
self._map_parameters()
return "\n\n".join(f"[{idx}]: {m!r}" for idx, m in enumerate(self._leaflist))
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
("Expression", expression),
("Components", "\n" + indent(components)),
]
return super()._format_str(keywords=keywords)
def rename(self, name):
self.name = name
return self
@property
def isleaf(self):
return False
@property
def inverse(self):
if self.op == "|":
return self.right.inverse | self.left.inverse
elif self.op == "&":
return self.left.inverse & self.right.inverse
else:
return NotImplemented
@property
def fittable(self):
"""Set the fittable attribute on a compound model."""
if self._fittable is None:
if self._leaflist is None:
self._map_parameters()
self._fittable = all(m.fittable for m in self._leaflist)
return self._fittable
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
def _map_parameters(self):
"""
Map all the constituent model parameters to the compound object,
renaming as necessary by appending a suffix number.
This can be an expensive operation, particularly for a complex
expression tree.
All the corresponding parameter attributes are created that one
expects for the Model class.
The parameter objects that the attributes point to are the same
objects as in the constiutent models. Changes made to parameter
values to either are seen by both.
Prior to calling this, none of the associated attributes will
exist. This method must be called to make the model usable by
fitting engines.
If oldnames=True, then parameters are named as in the original
implementation of compound models.
"""
if self._parameters is not None:
# do nothing
return
if self._leaflist is None:
self._make_leaflist()
self._parameters_ = {}
param_map = {}
self._param_names = []
for lindex, leaf in enumerate(self._leaflist):
if not isinstance(leaf, dict):
for param_name in leaf.param_names:
param = getattr(leaf, param_name)
new_param_name = f"{param_name}_{lindex}"
self.__dict__[new_param_name] = param
self._parameters_[new_param_name] = param
self._param_names.append(new_param_name)
param_map[new_param_name] = (lindex, param_name)
self._param_metrics = {}
self._param_map = param_map
self._param_map_inverse = {v: k for k, v in param_map.items()}
self._initialize_slices()
self._param_names = tuple(self._param_names)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name] = {}
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
@staticmethod
def _recursive_lookup(branch, adict, key):
if isinstance(branch, CompoundModel):
return adict[key]
return branch, key
def inputs_map(self):
"""
Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.
"""
inputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {inp: (self, inp) for inp in self.inputs}
elif self.op == "|":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
if isinstance(self.right, CompoundModel):
r_inputs_map = self.right.inputs_map()
for i, inp in enumerate(self.inputs):
if i < len(self.left.inputs): # Get from left
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[self.left.inputs[i]]
else:
inputs_map[inp] = self.left, self.left.inputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
inputs_map[inp] = r_inputs_map[
self.right.inputs[i - len(self.left.inputs)]
]
else:
inputs_map[inp] = (
self.right,
self.right.inputs[i - len(self.left.inputs)],
)
elif self.op == "fix_inputs":
fixed_ind = list(self.right.keys())
ind = [
list(self.left.inputs).index(i) if isinstance(i, str) else i
for i in fixed_ind
]
inp_ind = list(range(self.left.n_inputs))
for i in ind:
inp_ind.remove(i)
for i in inp_ind:
inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i]
else:
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.left.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
return inputs_map
def _parameter_units_for_data_units(self, input_units, output_units):
if self._leaflist is None:
self._map_parameters()
units_for_data = {}
for imodel, model in enumerate(self._leaflist):
units_for_data_leaf = model._parameter_units_for_data_units(
input_units, output_units
)
for param_leaf in units_for_data_leaf:
param = self._param_map_inverse[(imodel, param_leaf)]
units_for_data[param] = units_for_data_leaf[param_leaf]
return units_for_data
@property
def input_units(self):
inputs_map = self.inputs_map()
input_units_dict = {
key: inputs_map[key][0].input_units[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units is not None
}
if input_units_dict:
return input_units_dict
return None
@property
def input_units_equivalencies(self):
inputs_map = self.inputs_map()
input_units_equivalencies_dict = {
key: inputs_map[key][0].input_units_equivalencies[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units_equivalencies is not None
}
if not input_units_equivalencies_dict:
return None
return input_units_equivalencies_dict
@property
def input_units_allow_dimensionless(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_allow_dimensionless[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def input_units_strict(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_strict[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def return_units(self):
outputs_map = self.outputs_map()
return {
key: outputs_map[key][0].return_units[orig_key]
for key, (mod, orig_key) in outputs_map.items()
if outputs_map[key][0].return_units is not None
}
def outputs_map(self):
"""
Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.
"""
outputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {out: (self, out) for out in self.outputs}
elif self.op == "|":
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for out in self.outputs:
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[out]
else:
outputs_map[out] = self.right, out
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for i, out in enumerate(self.outputs):
if i < len(self.left.outputs): # Get from left
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map[self.left.outputs[i]]
else:
outputs_map[out] = self.left, self.left.outputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[
self.right.outputs[i - len(self.left.outputs)]
]
else:
outputs_map[out] = (
self.right,
self.right.outputs[i - len(self.left.outputs)],
)
elif self.op == "fix_inputs":
return self.left.outputs_map()
else:
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
for out in self.left.outputs:
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map()[out]
else:
outputs_map[out] = self.left, out
return outputs_map
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = self.get_bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel, important when using
# add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
def replace_submodel(self, name, model):
"""
Construct a new `~astropy.modeling.CompoundModel` instance from an
existing CompoundModel, replacing the named submodel with a new model.
In order to ensure that inverses and names are kept/reconstructed, it's
necessary to rebuild the CompoundModel from the replaced node all the
way back to the base. The original CompoundModel is left untouched.
Parameters
----------
name : str
name of submodel to be replaced
model : `~astropy.modeling.Model`
replacement model
"""
submodels = [
m for m in self.traverse_postorder() if getattr(m, "name", None) == name
]
if submodels:
if len(submodels) > 1:
raise ValueError(f"More than one submodel named {name}")
old_model = submodels.pop()
if len(old_model) != len(model):
raise ValueError(
"New and old models must have equal values for n_models"
)
# Do this check first in order to raise a more helpful Exception,
# although it would fail trying to construct the new CompoundModel
if (
old_model.n_inputs != model.n_inputs
or old_model.n_outputs != model.n_outputs
):
raise ValueError(
"New model must match numbers of inputs and "
"outputs of existing model"
)
tree = _get_submodel_path(self, name)
while tree:
branch = self.copy()
for node in tree[:-1]:
branch = getattr(branch, node)
setattr(branch, tree[-1], model)
model = CompoundModel(
branch.op, branch.left, branch.right, name=branch.name
)
tree = tree[:-1]
return model
else:
raise ValueError(f"No submodels found named {name}")
def _set_sub_models_and_parameter_units(self, left, right):
"""
Provides a work-around to properly set the sub models and respective
parameters's units/values when using ``without_units_for_data``
or ``without_units_for_data`` methods.
"""
model = CompoundModel(self.op, left, right)
self.left = left
self.right = right
for name in model.param_names:
model_parameter = getattr(model, name)
parameter = getattr(self, name)
parameter.value = model_parameter.value
parameter._set_unit(model_parameter.unit, force=True)
def without_units_for_data(self, **kwargs):
"""
See `~astropy.modeling.Model.without_units_for_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. It does this
by modifying the output units of each sub model by using the output
units of the other sub model so that we can apply the original function
and get the desired result.
Additional data has to be output in the mixed output unit case
so that the units can be properly rebuilt by
`~astropy.modeling.CompoundModel.with_units_from_data`.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
model = self.copy()
inputs = {inp: kwargs[inp] for inp in self.inputs}
left_units = self.left.output_units(**kwargs)
right_units = self.right.output_units(**kwargs)
if self.op == "*":
left_kwargs = {
out: kwargs[out] / right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: kwargs[out] / left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
else:
left_kwargs = {
out: kwargs[out] * right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: 1 / kwargs[out] * left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
left_kwargs.update(inputs.copy())
right_kwargs.update(inputs.copy())
left = self.left.without_units_for_data(**left_kwargs)
if isinstance(left, tuple):
left_kwargs["_left_kwargs"] = left[1]
left_kwargs["_right_kwargs"] = left[2]
left = left[0]
right = self.right.without_units_for_data(**right_kwargs)
if isinstance(right, tuple):
right_kwargs["_left_kwargs"] = right[1]
right_kwargs["_right_kwargs"] = right[2]
right = right[0]
model._set_sub_models_and_parameter_units(left, right)
return model, left_kwargs, right_kwargs
else:
return super().without_units_for_data(**kwargs)
def with_units_from_data(self, **kwargs):
"""
See `~astropy.modeling.Model.with_units_from_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. In order to
do this it requires some additional information output by
`~astropy.modeling.CompoundModel.without_units_for_data` passed as
keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
left_kwargs = kwargs.pop("_left_kwargs")
right_kwargs = kwargs.pop("_right_kwargs")
left = self.left.with_units_from_data(**left_kwargs)
right = self.right.with_units_from_data(**right_kwargs)
model = self.copy()
model._set_sub_models_and_parameter_units(left, right)
return model
else:
return super().with_units_from_data(**kwargs)
def _get_submodel_path(model, name):
"""Find the route down a CompoundModel's tree to the model with the
specified name (whether it's a leaf or not)"""
if getattr(model, "name", None) == name:
return []
try:
return ["left"] + _get_submodel_path(model.left, name)
except (AttributeError, TypeError):
pass
try:
return ["right"] + _get_submodel_path(model.right, name)
except (AttributeError, TypeError):
pass
def binary_operation(binoperator, left, right):
"""
Perform binary operation. Operands may be matching tuples of operands.
"""
if isinstance(left, tuple) and isinstance(right, tuple):
return tuple(binoperator(item[0], item[1]) for item in zip(left, right))
return binoperator(left, right)
def get_ops(tree, opset):
"""
Recursive function to collect operators used.
"""
if isinstance(tree, CompoundModel):
opset.add(tree.op)
get_ops(tree.left, opset)
get_ops(tree.right, opset)
else:
return
def make_subtree_dict(tree, nodepath, tdict, leaflist):
"""
Traverse a tree noting each node by a key that indicates all the
left/right choices necessary to reach that node. Each key will
reference a tuple that contains:
- reference to the compound model for that node.
- left most index contained within that subtree
(relative to all indices for the whole tree)
- right most index contained within that subtree
"""
# if this is a leaf, just append it to the leaflist
if not hasattr(tree, "isleaf"):
leaflist.append(tree)
else:
leftmostind = len(leaflist)
make_subtree_dict(tree.left, nodepath + "l", tdict, leaflist)
make_subtree_dict(tree.right, nodepath + "r", tdict, leaflist)
rightmostind = len(leaflist) - 1
tdict[nodepath] = (tree, leftmostind, rightmostind)
_ORDER_OF_OPERATORS = [("fix_inputs",), ("|",), ("&",), ("+", "-"), ("*", "/"), ("**",)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None):
"""
This function creates a compound model with one or more of the input
values of the input model assigned fixed values (scalar or array).
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that one or more of the
model input values will be fixed to some constant value.
values : dict
A dictionary where the key identifies which input to fix
and its value is the value to fix it at. The key may either be the
name of the input or a number reflecting its order in the inputs.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> g = Gaussian2D(1, 2, 3, 4, 5)
>>> gv = fix_inputs(g, {0: 2.5})
Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y)
"""
model = CompoundModel("fix_inputs", modelinstance, values)
if bounding_boxes is not None:
if selector_args is None:
selector_args = tuple((key, True) for key in values.keys())
bbox = CompoundBoundingBox.validate(
modelinstance, bounding_boxes, selector_args
)
_selector = bbox.selector_args.get_fixed_values(modelinstance, values)
new_bbox = bbox[_selector]
new_bbox = new_bbox.__class__.validate(model, new_bbox)
model.bounding_box = new_bbox
return model
def bind_bounding_box(modelinstance, bounding_box, ignored=None, order="C"):
"""
Set a validated bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated bounding box will be set on.
bounding_box : tuple
A bounding box tuple, see :ref:`astropy:bounding-boxes` for details
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = ModelBoundingBox.validate(
modelinstance, bounding_box, ignored=ignored, order=order
)
def bind_compound_bounding_box(
modelinstance,
bounding_boxes,
selector_args,
create_selector=None,
ignored=None,
order="C",
):
"""
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = CompoundBoundingBox.validate(
modelinstance,
bounding_boxes,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
)
def custom_model(*args, fit_deriv=None):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
The model is separable only if there is a single input.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
The standard settable model properties can be configured by default
using keyword arguments matching the name of the property; however,
these values are not set as model "parameters". Moreover, users
cannot use keyword arguments matching non-settable model properties,
with the exception of ``n_outputs`` which should be set to the number of
outputs of your function.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
f"{__name__} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any)."
)
def _custom_model_inputs(func):
"""
Processes the inputs to the `custom_model`'s function into the appropriate
categories.
Parameters
----------
func : callable
Returns
-------
inputs : list
list of evaluation inputs
special_params : dict
dictionary of model properties which require special treatment
settable_params : dict
dictionary of defaults for settable model properties
params : dict
dictionary of model parameters set by `custom_model`'s function
"""
inputs, parameters = get_inputs_and_params(func)
special = ["n_outputs"]
settable = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is not None
]
properties = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is None and attr not in special
]
special_params = {}
settable_params = {}
params = {}
for param in parameters:
if param.name in special:
special_params[param.name] = param.default
elif param.name in settable:
settable_params[param.name] = param.default
elif param.name in properties:
raise ValueError(
f"Parameter '{param.name}' cannot be a model property: {properties}."
)
else:
params[param.name] = param.default
return inputs, special_params, settable_params, params
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable object"
)
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other callable object"
)
model_name = func.__name__
inputs, special_params, settable_params, params = _custom_model_inputs(func)
if fit_deriv is not None and len(fit_deriv.__defaults__) != len(params):
raise ModelDefinitionError(
"derivative function should accept same number of parameters as func."
)
params = {
param: Parameter(param, default=default) for param, default in params.items()
}
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
members = {
"__module__": str(modname),
"__doc__": func.__doc__,
"n_inputs": len(inputs),
"n_outputs": special_params.pop("n_outputs", 1),
"evaluate": staticmethod(func),
"_settable_properties": settable_params,
}
if fit_deriv is not None:
members["fit_deriv"] = staticmethod(fit_deriv)
members.update(params)
cls = type(model_name, (FittableModel,), members)
cls._separable = True if (len(inputs) == 1) else False
return cls
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from
``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError("If no bounding_box is set, coords or arr must be input.")
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError(
"number of array dimensions inconsistent with number of model inputs."
)
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError(
"coordinate length inconsistent with the number of model inputs."
)
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError("coordinate shape inconsistent with the array shape.")
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input"
" arr in one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def hide_inverse(model):
"""
This is a convenience function intended to disable automatic generation
of the inverse in compound models by disabling one of the constituent
model's inverse. This is to handle cases where user provided inverse
functions are not compatible within an expression.
Example:
compound_model.inverse = hide_inverse(m1) + m2 + m3
This will insure that the defined inverse itself won't attempt to
build its own inverse, which would otherwise fail in this example
(e.g., m = m1 + m2 + m3 happens to raises an exception for this
reason.)
Note that this permanently disables it. To prevent that either copy
the model or restore the inverse later.
"""
del model.inverse
return model
|
50e2963985aa0f1d29e5424374f9f35f2db21baa3515e1b79338b4cdef818cc5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module is to contain an improved bounding box
"""
import abc
import copy
import warnings
from collections import namedtuple
from typing import Any, Callable, Dict, List, Tuple
import numpy as np
from astropy.units import Quantity
from astropy.utils import isiterable
__all__ = ["ModelBoundingBox", "CompoundBoundingBox"]
_BaseInterval = namedtuple("_BaseInterval", "lower upper")
class _Interval(_BaseInterval):
"""
A single input's bounding box interval.
Parameters
----------
lower : float
The lower bound of the interval
upper : float
The upper bound of the interval
Methods
-------
validate :
Constructs a valid interval
outside :
Determine which parts of an input array are outside the interval.
domain :
Constructs a discretization of the points inside the interval.
"""
def __repr__(self):
return f"Interval(lower={self.lower}, upper={self.upper})"
def copy(self):
return copy.deepcopy(self)
@staticmethod
def _validate_shape(interval):
"""Validate the shape of an interval representation"""
MESSAGE = """An interval must be some sort of sequence of length 2"""
try:
shape = np.shape(interval)
except TypeError:
try:
# np.shape does not work with lists of Quantities
if len(interval) == 1:
interval = interval[0]
shape = np.shape([b.to_value() for b in interval])
except (ValueError, TypeError, AttributeError):
raise ValueError(MESSAGE)
valid_shape = shape in ((2,), (1, 2), (2, 0))
if not valid_shape:
valid_shape = (
len(shape) > 0
and shape[0] == 2
and all(isinstance(b, np.ndarray) for b in interval)
)
if not isiterable(interval) or not valid_shape:
raise ValueError(MESSAGE)
@classmethod
def _validate_bounds(cls, lower, upper):
"""Validate the bounds are reasonable and construct an interval from them."""
if (np.asanyarray(lower) > np.asanyarray(upper)).all():
warnings.warn(
f"Invalid interval: upper bound {upper} "
f"is strictly less than lower bound {lower}.",
RuntimeWarning,
)
return cls(lower, upper)
@classmethod
def validate(cls, interval):
"""
Construct and validate an interval
Parameters
----------
interval : iterable
A representation of the interval.
Returns
-------
A validated interval.
"""
cls._validate_shape(interval)
if len(interval) == 1:
interval = tuple(interval[0])
else:
interval = tuple(interval)
return cls._validate_bounds(interval[0], interval[1])
def outside(self, _input: np.ndarray):
"""
Parameters
----------
_input : np.ndarray
The evaluation input in the form of an array.
Returns
-------
Boolean array indicating which parts of _input are outside the interval:
True -> position outside interval
False -> position inside interval
"""
return np.logical_or(_input < self.lower, _input > self.upper)
def domain(self, resolution):
return np.arange(self.lower, self.upper + resolution, resolution)
# The interval where all ignored inputs can be found.
_ignored_interval = _Interval.validate((-np.inf, np.inf))
def get_index(model, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
if isinstance(key, str):
if key in model.inputs:
index = model.inputs.index(key)
else:
raise ValueError(f"'{key}' is not one of the inputs: {model.inputs}.")
elif np.issubdtype(type(key), np.integer):
if 0 <= key < len(model.inputs):
index = key
else:
raise IndexError(
f"Integer key: {key} must be non-negative and < {len(model.inputs)}."
)
else:
raise ValueError(f"Key value: {key} must be string or integer.")
return index
def get_name(model, index: int):
"""Get the input name corresponding to the input index"""
return model.inputs[index]
class _BoundingDomain(abc.ABC):
"""
Base class for ModelBoundingBox and CompoundBoundingBox.
This is where all the `~astropy.modeling.core.Model` evaluation
code for evaluating with a bounding box is because it is common
to both types of bounding box.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this bounding domain is for.
prepare_inputs :
Generates the necessary input information so that model can
be evaluated only for input points entirely inside bounding_box.
This needs to be implemented by a subclass. Note that most of
the implementation is in ModelBoundingBox.
prepare_outputs :
Fills the output values in for any input points outside the
bounding_box.
evaluate :
Performs a complete model evaluation while enforcing the bounds
on the inputs and returns a complete output.
"""
def __init__(self, model, ignored: List[int] = None, order: str = "C"):
self._model = model
self._ignored = self._validate_ignored(ignored)
self._order = self._get_order(order)
@property
def model(self):
return self._model
@property
def order(self) -> str:
return self._order
@property
def ignored(self) -> List[int]:
return self._ignored
def _get_order(self, order: str = None) -> str:
"""
Get if bounding_box is C/python ordered or Fortran/mathematically
ordered
"""
if order is None:
order = self._order
if order not in ("C", "F"):
raise ValueError(
"order must be either 'C' (C/python order) or "
f"'F' (Fortran/mathematical order), got: {order}."
)
return order
def _get_index(self, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
return get_index(self._model, key)
def _get_name(self, index: int):
"""Get the input name corresponding to the input index"""
return get_name(self._model, index)
@property
def ignored_inputs(self) -> List[str]:
return [self._get_name(index) for index in self._ignored]
def _validate_ignored(self, ignored: list) -> List[int]:
if ignored is None:
return []
else:
return [self._get_index(key) for key in ignored]
def __call__(self, *args, **kwargs):
raise NotImplementedError(
"This bounding box is fixed by the model and does not have "
"adjustable parameters."
)
@abc.abstractmethod
def fix_inputs(self, model, fixed_inputs: dict):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
"""
raise NotImplementedError("This should be implemented by a child class.")
@abc.abstractmethod
def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
raise NotImplementedError("This has not been implemented for BoundingDomain.")
@staticmethod
def _base_output(input_shape, fill_value):
"""
Create a baseline output, assuming that the entire input is outside
the bounding box
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
An array of the correct shape containing all fill_value
"""
return np.zeros(input_shape) + fill_value
def _all_out_output(self, input_shape, fill_value):
"""
Create output if all inputs are outside the domain
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
A full set of outputs for case that all inputs are outside domain.
"""
return [
self._base_output(input_shape, fill_value)
for _ in range(self._model.n_outputs)
], None
def _modify_output(self, valid_output, valid_index, input_shape, fill_value):
"""
For a single output fill in all the parts corresponding to inputs
outside the bounding box.
Parameters
----------
valid_output : numpy array
The output from the model corresponding to inputs inside the
bounding box
valid_index : numpy array
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
An output array with all the indices corresponding to inputs
outside the bounding box filled in by fill_value
"""
output = self._base_output(input_shape, fill_value)
if not output.shape:
output = np.array(valid_output)
else:
output[valid_index] = valid_output
if np.isscalar(valid_output):
output = output.item(0)
return output
def _prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value):
"""
Fill in all the outputs of the model corresponding to inputs
outside the bounding_box.
Parameters
----------
valid_outputs : list of numpy array
The list of outputs from the model corresponding to inputs
inside the bounding box
valid_index : numpy array
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
List of filled in output arrays.
"""
outputs = []
for valid_output in valid_outputs:
outputs.append(
self._modify_output(valid_output, valid_index, input_shape, fill_value)
)
return outputs
def prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value):
"""
Fill in all the outputs of the model corresponding to inputs
outside the bounding_box, adjusting any single output model so that
its output becomes a list of containing that output.
Parameters
----------
valid_outputs : list
The list of outputs from the model corresponding to inputs
inside the bounding box
valid_index : array_like
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
"""
if self._model.n_outputs == 1:
valid_outputs = [valid_outputs]
return self._prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
@staticmethod
def _get_valid_outputs_unit(valid_outputs, with_units: bool):
"""
Get the unit for outputs if one is required.
Parameters
----------
valid_outputs : list of numpy array
The list of outputs from the model corresponding to inputs
inside the bounding box
with_units : bool
whether or not a unit is required
"""
if with_units:
return getattr(valid_outputs, "unit", None)
def _evaluate_model(
self,
evaluate: Callable,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units: bool,
):
"""
Evaluate the model using the given evaluate routine
Parameters
----------
evaluate : Callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list of numpy arrays
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : numpy array
array of all indices inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
with_units : bool
whether or not a unit is required
Returns
-------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
"""
valid_outputs = evaluate(valid_inputs)
valid_outputs_unit = self._get_valid_outputs_unit(valid_outputs, with_units)
return (
self.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value),
valid_outputs_unit,
)
def _evaluate(
self, evaluate: Callable, inputs, input_shape, fill_value, with_units: bool
):
"""
Perform model evaluation steps:
prepare_inputs -> evaluate -> prepare_outputs
Parameters
----------
evaluate : Callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list of numpy arrays
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : numpy array
array of all indices inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
with_units : bool
whether or not a unit is required
Returns
-------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
"""
valid_inputs, valid_index, all_out = self.prepare_inputs(input_shape, inputs)
if all_out:
return self._all_out_output(input_shape, fill_value)
else:
return self._evaluate_model(
evaluate, valid_inputs, valid_index, input_shape, fill_value, with_units
)
@staticmethod
def _set_outputs_unit(outputs, valid_outputs_unit):
"""
Set the units on the outputs
prepare_inputs -> evaluate -> prepare_outputs -> set output units
Parameters
----------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
Returns
-------
List containing filled in output values and units
"""
if valid_outputs_unit is not None:
return Quantity(outputs, valid_outputs_unit, copy=False, subok=True)
return outputs
def evaluate(self, evaluate: Callable, inputs, fill_value):
"""
Perform full model evaluation steps:
prepare_inputs -> evaluate -> prepare_outputs -> set output units
Parameters
----------
evaluate : callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
"""
input_shape = self._model.input_shape(inputs)
# NOTE: CompoundModel does not currently support units during
# evaluation for bounding_box so this feature is turned off
# for CompoundModel(s).
outputs, valid_outputs_unit = self._evaluate(
evaluate, inputs, input_shape, fill_value, self._model.bbox_with_units
)
return tuple(self._set_outputs_unit(outputs, valid_outputs_unit))
class ModelBoundingBox(_BoundingDomain):
"""
A model's bounding box
Parameters
----------
intervals : dict
A dictionary containing all the intervals for each model input
keys -> input index
values -> interval for that index
model : `~astropy.modeling.Model`
The Model this bounding_box is for.
ignored : list
A list containing all the inputs (index) which will not be
checked for whether or not their elements are in/out of an interval.
order : optional, str
The ordering that is assumed for the tuple representation of this
bounding_box. Options: 'C': C/Python order, e.g. z, y, x.
(default), 'F': Fortran/mathematical notation order, e.g. x, y, z.
"""
def __init__(
self,
intervals: Dict[int, _Interval],
model,
ignored: List[int] = None,
order: str = "C",
):
super().__init__(model, ignored, order)
self._intervals = {}
if intervals != () and intervals != {}:
self._validate(intervals, order=order)
def copy(self, ignored=None):
intervals = {
index: interval.copy() for index, interval in self._intervals.items()
}
if ignored is None:
ignored = self._ignored.copy()
return ModelBoundingBox(
intervals, self._model, ignored=ignored, order=self._order
)
@property
def intervals(self) -> Dict[int, _Interval]:
"""Return bounding_box labeled using input positions"""
return self._intervals
@property
def named_intervals(self) -> Dict[str, _Interval]:
"""Return bounding_box labeled using input names"""
return {self._get_name(index): bbox for index, bbox in self._intervals.items()}
def __repr__(self):
parts = ["ModelBoundingBox(", " intervals={"]
for name, interval in self.named_intervals.items():
parts.append(f" {name}: {interval}")
parts.append(" }")
if len(self._ignored) > 0:
parts.append(f" ignored={self.ignored_inputs}")
parts.append(
f" model={self._model.__class__.__name__}(inputs={self._model.inputs})"
)
parts.append(f" order='{self._order}'")
parts.append(")")
return "\n".join(parts)
def __len__(self):
return len(self._intervals)
def __contains__(self, key):
try:
return self._get_index(key) in self._intervals or self._ignored
except (IndexError, ValueError):
return False
def has_interval(self, key):
return self._get_index(key) in self._intervals
def __getitem__(self, key):
"""Get bounding_box entries by either input name or input index"""
index = self._get_index(key)
if index in self._ignored:
return _ignored_interval
else:
return self._intervals[self._get_index(key)]
def bounding_box(self, order: str = None):
"""
Return the old tuple of tuples representation of the bounding_box
order='C' corresponds to the old bounding_box ordering
order='F' corresponds to the gwcs bounding_box ordering.
"""
if len(self._intervals) == 1:
return tuple(list(self._intervals.values())[0])
else:
order = self._get_order(order)
inputs = self._model.inputs
if order == "C":
inputs = inputs[::-1]
bbox = tuple(tuple(self[input_name]) for input_name in inputs)
if len(bbox) == 1:
bbox = bbox[0]
return bbox
def __eq__(self, value):
"""Note equality can be either with old representation or new one."""
if isinstance(value, tuple):
return self.bounding_box() == value
elif isinstance(value, ModelBoundingBox):
return (self.intervals == value.intervals) and (
self.ignored == value.ignored
)
else:
return False
def __setitem__(self, key, value):
"""Validate and store interval under key (input index or input name)."""
index = self._get_index(key)
if index in self._ignored:
self._ignored.remove(index)
self._intervals[index] = _Interval.validate(value)
def __delitem__(self, key):
"""Delete stored interval"""
index = self._get_index(key)
if index in self._ignored:
raise RuntimeError(f"Cannot delete ignored input: {key}!")
del self._intervals[index]
self._ignored.append(index)
def _validate_dict(self, bounding_box: dict):
"""Validate passing dictionary of intervals and setting them."""
for key, value in bounding_box.items():
self[key] = value
@property
def _available_input_index(self):
model_input_index = [self._get_index(_input) for _input in self._model.inputs]
return [_input for _input in model_input_index if _input not in self._ignored]
def _validate_sequence(self, bounding_box, order: str = None):
"""
Validate passing tuple of tuples representation (or related) and setting them.
"""
order = self._get_order(order)
if order == "C":
# If bounding_box is C/python ordered, it needs to be reversed
# to be in Fortran/mathematical/input order.
bounding_box = bounding_box[::-1]
for index, value in enumerate(bounding_box):
self[self._available_input_index[index]] = value
@property
def _n_inputs(self) -> int:
n_inputs = self._model.n_inputs - len(self._ignored)
if n_inputs > 0:
return n_inputs
else:
return 0
def _validate_iterable(self, bounding_box, order: str = None):
"""Validate and set any iterable representation"""
if len(bounding_box) != self._n_inputs:
raise ValueError(
f"Found {len(bounding_box)} intervals, "
f"but must have exactly {self._n_inputs}."
)
if isinstance(bounding_box, dict):
self._validate_dict(bounding_box)
else:
self._validate_sequence(bounding_box, order)
def _validate(self, bounding_box, order: str = None):
"""Validate and set any representation"""
if self._n_inputs == 1 and not isinstance(bounding_box, dict):
self[self._available_input_index[0]] = bounding_box
else:
self._validate_iterable(bounding_box, order)
@classmethod
def validate(
cls,
model,
bounding_box,
ignored: list = None,
order: str = "C",
_preserve_ignore: bool = False,
**kwargs,
):
"""
Construct a valid bounding box for a model.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be a bounding_box
bounding_box : dict, tuple
A possible representation of the bounding box
order : optional, str
The order that a tuple representation will be assumed to be
Default: 'C'
"""
if isinstance(bounding_box, ModelBoundingBox):
order = bounding_box.order
if _preserve_ignore:
ignored = bounding_box.ignored
bounding_box = bounding_box.named_intervals
new = cls({}, model, ignored=ignored, order=order)
new._validate(bounding_box)
return new
def fix_inputs(self, model, fixed_inputs: dict, _keep_ignored=False):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
keep_ignored : bool
Keep the ignored inputs of the bounding box (internal argument only)
"""
new = self.copy()
for _input in fixed_inputs.keys():
del new[_input]
if _keep_ignored:
ignored = new.ignored
else:
ignored = None
return ModelBoundingBox.validate(
model, new.named_intervals, ignored=ignored, order=new._order
)
@property
def dimension(self):
return len(self)
def domain(self, resolution, order: str = None):
inputs = self._model.inputs
order = self._get_order(order)
if order == "C":
inputs = inputs[::-1]
return [self[input_name].domain(resolution) for input_name in inputs]
def _outside(self, input_shape, inputs):
"""
Get all the input positions which are outside the bounding_box,
so that the corresponding outputs can be filled with the fill
value (default NaN).
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
outside_index : bool-numpy array
True -> position outside bounding_box
False -> position inside bounding_box
all_out : bool
if all of the inputs are outside the bounding_box
"""
all_out = False
outside_index = np.zeros(input_shape, dtype=bool)
for index, _input in enumerate(inputs):
_input = np.asanyarray(_input)
outside = np.broadcast_to(self[index].outside(_input), input_shape)
outside_index[outside] = True
if outside_index.all():
all_out = True
break
return outside_index, all_out
def _valid_index(self, input_shape, inputs):
"""
Get the indices of all the inputs inside the bounding_box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_index : numpy array
array of all indices inside the bounding box
all_out : bool
if all of the inputs are outside the bounding_box
"""
outside_index, all_out = self._outside(input_shape, inputs)
valid_index = np.atleast_1d(np.logical_not(outside_index)).nonzero()
if len(valid_index[0]) == 0:
all_out = True
return valid_index, all_out
def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
valid_index, all_out = self._valid_index(input_shape, inputs)
valid_inputs = []
if not all_out:
for _input in inputs:
if input_shape:
valid_input = np.broadcast_to(np.atleast_1d(_input), input_shape)[
valid_index
]
if np.isscalar(_input):
valid_input = valid_input.item(0)
valid_inputs.append(valid_input)
else:
valid_inputs.append(_input)
return tuple(valid_inputs), valid_index, all_out
_BaseSelectorArgument = namedtuple("_BaseSelectorArgument", "index ignore")
class _SelectorArgument(_BaseSelectorArgument):
"""
Contains a single CompoundBoundingBox slicing input.
Parameters
----------
index : int
The index of the input in the input list
ignore : bool
Whether or not this input will be ignored by the bounding box.
Methods
-------
validate :
Returns a valid SelectorArgument for a given model.
get_selector :
Returns the value of the input for use in finding the correct
bounding_box.
get_fixed_value :
Gets the slicing value from a fix_inputs set of values.
"""
def __new__(cls, index, ignore):
self = super().__new__(cls, index, ignore)
return self
@classmethod
def validate(cls, model, argument, ignored: bool = True):
"""
Construct a valid selector argument for a CompoundBoundingBox.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be an argument for.
argument : int or str
A representation of which evaluation input to use
ignored : optional, bool
Whether or not to ignore this argument in the ModelBoundingBox.
Returns
-------
Validated selector_argument
"""
return cls(get_index(model, argument), ignored)
def get_selector(self, *inputs):
"""
Get the selector value corresponding to this argument
Parameters
----------
*inputs :
All the processed model evaluation inputs.
"""
_selector = inputs[self.index]
if isiterable(_selector):
if len(_selector) == 1:
return _selector[0]
else:
return tuple(_selector)
return _selector
def name(self, model) -> str:
"""
Get the name of the input described by this selector argument
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return get_name(model, self.index)
def pretty_repr(self, model):
"""
Get a pretty-print representation of this object
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return f"Argument(name='{self.name(model)}', ignore={self.ignore})"
def get_fixed_value(self, model, values: dict):
"""
Gets the value fixed input corresponding to this argument
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
values : dict
Dictionary of fixed inputs.
"""
if self.index in values:
return values[self.index]
else:
if self.name(model) in values:
return values[self.name(model)]
else:
raise RuntimeError(
f"{self.pretty_repr(model)} was not found in {values}"
)
def is_argument(self, model, argument) -> bool:
"""
Determine if passed argument is described by this selector argument
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
argument : int or str
A representation of which evaluation input is being used
"""
return self.index == get_index(model, argument)
def named_tuple(self, model):
"""
Get a tuple representation of this argument using the input
name from the model.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return (self.name(model), self.ignore)
class _SelectorArguments(tuple):
"""
Contains the CompoundBoundingBox slicing description
Parameters
----------
input_ :
The SelectorArgument values
Methods
-------
validate :
Returns a valid SelectorArguments for its model.
get_selector :
Returns the selector a set of inputs corresponds to.
is_selector :
Determines if a selector is correctly formatted for this CompoundBoundingBox.
get_fixed_value :
Gets the selector from a fix_inputs set of values.
"""
_kept_ignore = None
def __new__(cls, input_: Tuple[_SelectorArgument], kept_ignore: List = None):
self = super().__new__(cls, input_)
if kept_ignore is None:
self._kept_ignore = []
else:
self._kept_ignore = kept_ignore
return self
def pretty_repr(self, model):
"""
Get a pretty-print representation of this object
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
"""
parts = ["SelectorArguments("]
for argument in self:
parts.append(f" {argument.pretty_repr(model)}")
parts.append(")")
return "\n".join(parts)
@property
def ignore(self):
"""Get the list of ignored inputs"""
ignore = [argument.index for argument in self if argument.ignore]
ignore.extend(self._kept_ignore)
return ignore
@property
def kept_ignore(self):
"""The arguments to persist in ignoring"""
return self._kept_ignore
@classmethod
def validate(cls, model, arguments, kept_ignore: List = None):
"""
Construct a valid Selector description for a CompoundBoundingBox.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
arguments :
The individual argument information
kept_ignore :
Arguments to persist as ignored
"""
inputs = []
for argument in arguments:
_input = _SelectorArgument.validate(model, *argument)
if _input.index in [this.index for this in inputs]:
raise ValueError(
f"Input: '{get_name(model, _input.index)}' has been repeated."
)
inputs.append(_input)
if len(inputs) == 0:
raise ValueError("There must be at least one selector argument.")
if isinstance(arguments, _SelectorArguments):
if kept_ignore is None:
kept_ignore = []
kept_ignore.extend(arguments.kept_ignore)
return cls(tuple(inputs), kept_ignore)
def get_selector(self, *inputs):
"""
Get the selector corresponding to these inputs
Parameters
----------
*inputs :
All the processed model evaluation inputs.
"""
return tuple(argument.get_selector(*inputs) for argument in self)
def is_selector(self, _selector):
"""
Determine if this is a reasonable selector
Parameters
----------
_selector : tuple
The selector to check
"""
return isinstance(_selector, tuple) and len(_selector) == len(self)
def get_fixed_values(self, model, values: dict):
"""
Gets the value fixed input corresponding to this argument
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
values : dict
Dictionary of fixed inputs.
"""
return tuple(argument.get_fixed_value(model, values) for argument in self)
def is_argument(self, model, argument) -> bool:
"""
Determine if passed argument is one of the selector arguments
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which evaluation input is being used
"""
for selector_arg in self:
if selector_arg.is_argument(model, argument):
return True
else:
return False
def selector_index(self, model, argument):
"""
Get the index of the argument passed in the selector tuples
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
for index, selector_arg in enumerate(self):
if selector_arg.is_argument(model, argument):
return index
else:
raise ValueError(
f"{argument} does not correspond to any selector argument."
)
def reduce(self, model, argument):
"""
Reduce the selector arguments by the argument given
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
arguments = list(self)
kept_ignore = [arguments.pop(self.selector_index(model, argument)).index]
kept_ignore.extend(self._kept_ignore)
return _SelectorArguments.validate(model, tuple(arguments), kept_ignore)
def add_ignore(self, model, argument):
"""
Add argument to the kept_ignore list
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
if self.is_argument(model, argument):
raise ValueError(
f"{argument}: is a selector argument and cannot be ignored."
)
kept_ignore = [get_index(model, argument)]
return _SelectorArguments.validate(model, self, kept_ignore)
def named_tuple(self, model):
"""
Get a tuple of selector argument tuples using input names
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
"""
return tuple(selector_arg.named_tuple(model) for selector_arg in self)
class CompoundBoundingBox(_BoundingDomain):
"""
A model's compound bounding box
Parameters
----------
bounding_boxes : dict
A dictionary containing all the ModelBoundingBoxes that are possible
keys -> _selector (extracted from model inputs)
values -> ModelBoundingBox
model : `~astropy.modeling.Model`
The Model this compound bounding_box is for.
selector_args : _SelectorArguments
A description of how to extract the selectors from model inputs.
create_selector : optional
A method which takes in the selector and the model to return a
valid bounding corresponding to that selector. This can be used
to construct new bounding_boxes for previously undefined selectors.
These new boxes are then stored for future lookups.
order : optional, str
The ordering that is assumed for the tuple representation of the
bounding_boxes.
"""
def __init__(
self,
bounding_boxes: Dict[Any, ModelBoundingBox],
model,
selector_args: _SelectorArguments,
create_selector: Callable = None,
ignored: List[int] = None,
order: str = "C",
):
super().__init__(model, ignored, order)
self._create_selector = create_selector
self._selector_args = _SelectorArguments.validate(model, selector_args)
self._bounding_boxes = {}
self._validate(bounding_boxes)
def copy(self):
bounding_boxes = {
selector: bbox.copy(self.selector_args.ignore)
for selector, bbox in self._bounding_boxes.items()
}
return CompoundBoundingBox(
bounding_boxes,
self._model,
selector_args=self._selector_args,
create_selector=copy.deepcopy(self._create_selector),
order=self._order,
)
def __repr__(self):
parts = ["CompoundBoundingBox(", " bounding_boxes={"]
# bounding_boxes
for _selector, bbox in self._bounding_boxes.items():
bbox_repr = bbox.__repr__().split("\n")
parts.append(f" {_selector} = {bbox_repr.pop(0)}")
for part in bbox_repr:
parts.append(f" {part}")
parts.append(" }")
# selector_args
selector_args_repr = self.selector_args.pretty_repr(self._model).split("\n")
parts.append(f" selector_args = {selector_args_repr.pop(0)}")
for part in selector_args_repr:
parts.append(f" {part}")
parts.append(")")
return "\n".join(parts)
@property
def bounding_boxes(self) -> Dict[Any, ModelBoundingBox]:
return self._bounding_boxes
@property
def selector_args(self) -> _SelectorArguments:
return self._selector_args
@selector_args.setter
def selector_args(self, value):
self._selector_args = _SelectorArguments.validate(self._model, value)
warnings.warn(
"Overriding selector_args may cause problems you should re-validate "
"the compound bounding box before use!",
RuntimeWarning,
)
@property
def named_selector_tuple(self) -> tuple:
return self._selector_args.named_tuple(self._model)
@property
def create_selector(self):
return self._create_selector
@staticmethod
def _get_selector_key(key):
if isiterable(key):
return tuple(key)
else:
return (key,)
def __setitem__(self, key, value):
_selector = self._get_selector_key(key)
if not self.selector_args.is_selector(_selector):
raise ValueError(f"{_selector} is not a selector!")
ignored = self.selector_args.ignore + self.ignored
self._bounding_boxes[_selector] = ModelBoundingBox.validate(
self._model, value, ignored, order=self._order
)
def _validate(self, bounding_boxes: dict):
for _selector, bounding_box in bounding_boxes.items():
self[_selector] = bounding_box
def __eq__(self, value):
if isinstance(value, CompoundBoundingBox):
return (
self.bounding_boxes == value.bounding_boxes
and self.selector_args == value.selector_args
and self.create_selector == value.create_selector
)
else:
return False
@classmethod
def validate(
cls,
model,
bounding_box: dict,
selector_args=None,
create_selector=None,
ignored: list = None,
order: str = "C",
_preserve_ignore: bool = False,
**kwarg,
):
"""
Construct a valid compound bounding box for a model.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be a bounding_box
bounding_box : dict
Dictionary of possible bounding_box representations
selector_args : optional
Description of the selector arguments
create_selector : optional, callable
Method for generating new selectors
order : optional, str
The order that a tuple representation will be assumed to be
Default: 'C'
"""
if isinstance(bounding_box, CompoundBoundingBox):
if selector_args is None:
selector_args = bounding_box.selector_args
if create_selector is None:
create_selector = bounding_box.create_selector
order = bounding_box.order
if _preserve_ignore:
ignored = bounding_box.ignored
bounding_box = bounding_box.bounding_boxes
if selector_args is None:
raise ValueError(
"Selector arguments must be provided "
"(can be passed as part of bounding_box argument)"
)
return cls(
bounding_box,
model,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
)
def __contains__(self, key):
return key in self._bounding_boxes
def _create_bounding_box(self, _selector):
self[_selector] = self._create_selector(_selector, model=self._model)
return self[_selector]
def __getitem__(self, key):
_selector = self._get_selector_key(key)
if _selector in self:
return self._bounding_boxes[_selector]
elif self._create_selector is not None:
return self._create_bounding_box(_selector)
else:
raise RuntimeError(f"No bounding box is defined for selector: {_selector}.")
def _select_bounding_box(self, inputs) -> ModelBoundingBox:
_selector = self.selector_args.get_selector(*inputs)
return self[_selector]
def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
bounding_box = self._select_bounding_box(inputs)
return bounding_box.prepare_inputs(input_shape, inputs)
def _matching_bounding_boxes(self, argument, value) -> Dict[Any, ModelBoundingBox]:
selector_index = self.selector_args.selector_index(self._model, argument)
matching = {}
for selector_key, bbox in self._bounding_boxes.items():
if selector_key[selector_index] == value:
new_selector_key = list(selector_key)
new_selector_key.pop(selector_index)
if bbox.has_interval(argument):
new_bbox = bbox.fix_inputs(
self._model, {argument: value}, _keep_ignored=True
)
else:
new_bbox = bbox.copy()
matching[tuple(new_selector_key)] = new_bbox
if len(matching) == 0:
raise ValueError(
f"Attempting to fix input {argument}, but there are no "
f"bounding boxes for argument value {value}."
)
return matching
def _fix_input_selector_arg(self, argument, value):
matching_bounding_boxes = self._matching_bounding_boxes(argument, value)
if len(self.selector_args) == 1:
return matching_bounding_boxes[()]
else:
return CompoundBoundingBox(
matching_bounding_boxes,
self._model,
self.selector_args.reduce(self._model, argument),
)
def _fix_input_bbox_arg(self, argument, value):
bounding_boxes = {}
for selector_key, bbox in self._bounding_boxes.items():
bounding_boxes[selector_key] = bbox.fix_inputs(
self._model, {argument: value}, _keep_ignored=True
)
return CompoundBoundingBox(
bounding_boxes,
self._model,
self.selector_args.add_ignore(self._model, argument),
)
def fix_inputs(self, model, fixed_inputs: dict):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
"""
fixed_input_keys = list(fixed_inputs.keys())
argument = fixed_input_keys.pop()
value = fixed_inputs[argument]
if self.selector_args.is_argument(self._model, argument):
bbox = self._fix_input_selector_arg(argument, value)
else:
bbox = self._fix_input_bbox_arg(argument, value)
if len(fixed_input_keys) > 0:
new_fixed_inputs = fixed_inputs.copy()
del new_fixed_inputs[argument]
bbox = bbox.fix_inputs(model, new_fixed_inputs)
if isinstance(bbox, CompoundBoundingBox):
selector_args = bbox.named_selector_tuple
bbox_dict = bbox
elif isinstance(bbox, ModelBoundingBox):
selector_args = None
bbox_dict = bbox.named_intervals
return bbox.__class__.validate(
model, bbox_dict, order=bbox.order, selector_args=selector_args
)
|
2a425a6f627ceda20897b0e61c5e66ee2f4057034fe773e788049382654fe0f1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
Implements projections--particularly sky projections defined in WCS Paper II
[1]_.
All angles are set and and displayed in degrees but internally computations are
performed in radians. All functions expect inputs and outputs degrees.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import abc
from itertools import chain, product
import numpy as np
from astropy import units as u
from astropy import wcs
from .core import Model
from .parameters import InputParameterError, Parameter
from .utils import _to_orig_unit, _to_radian
# List of tuples of the form
# (long class name without suffix, short WCSLIB projection code):
_PROJ_NAME_CODE = [
("ZenithalPerspective", "AZP"),
("SlantZenithalPerspective", "SZP"),
("Gnomonic", "TAN"),
("Stereographic", "STG"),
("SlantOrthographic", "SIN"),
("ZenithalEquidistant", "ARC"),
("ZenithalEqualArea", "ZEA"),
("Airy", "AIR"),
("CylindricalPerspective", "CYP"),
("CylindricalEqualArea", "CEA"),
("PlateCarree", "CAR"),
("Mercator", "MER"),
("SansonFlamsteed", "SFL"),
("Parabolic", "PAR"),
("Molleweide", "MOL"),
("HammerAitoff", "AIT"),
("ConicPerspective", "COP"),
("ConicEqualArea", "COE"),
("ConicEquidistant", "COD"),
("ConicOrthomorphic", "COO"),
("BonneEqualArea", "BON"),
("Polyconic", "PCO"),
("TangentialSphericalCube", "TSC"),
("COBEQuadSphericalCube", "CSC"),
("QuadSphericalCube", "QSC"),
("HEALPix", "HPX"),
("HEALPixPolar", "XPH"),
]
_NOT_SUPPORTED_PROJ_CODES = ["ZPN"]
_PROJ_NAME_CODE_MAP = dict(_PROJ_NAME_CODE)
projcodes = [code for _, code in _PROJ_NAME_CODE]
__all__ = [
"Projection",
"Pix2SkyProjection",
"Sky2PixProjection",
"Zenithal",
"Cylindrical",
"PseudoCylindrical",
"Conic",
"PseudoConic",
"QuadCube",
"HEALPix",
"AffineTransformation2D",
"projcodes",
] + list(map("_".join, product(["Pix2Sky", "Sky2Pix"], chain(*_PROJ_NAME_CODE))))
class _ParameterDS(Parameter):
"""
Same as `Parameter` but can indicate its modified status via the ``dirty``
property. This flag also gets set automatically when a parameter is
modified.
This ability to track parameter's modified status is needed for automatic
update of WCSLIB's prjprm structure (which may be a more-time intensive
operation) *only as required*.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dirty = True
def validate(self, value):
super().validate(value)
self.dirty = True
class Projection(Model):
"""Base class for all sky projections."""
# Radius of the generating sphere.
# This sets the circumference to 360 deg so that arc length is measured in deg.
r0 = 180 * u.deg / np.pi
_separable = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj = wcs.Prjprm()
@property
@abc.abstractmethod
def inverse(self):
"""
Inverse projection--all projection models must provide an inverse.
"""
@property
def prjprm(self):
"""WCSLIB ``prjprm`` structure."""
self._update_prj()
return self._prj
def _update_prj(self):
"""
A default updater for projection's pv.
.. warning::
This method assumes that PV0 is never modified. If a projection
that uses PV0 is ever implemented in this module, that projection
class should override this method.
.. warning::
This method assumes that the order in which PVi values (i>0)
are to be assigned is identical to the order of model parameters
in ``param_names``. That is, pv[1] = model.parameters[0], ...
"""
if not self.param_names:
return
pv = []
dirty = False
for p in self.param_names:
param = getattr(self, p)
pv.append(float(param.value))
dirty |= param.dirty
param.dirty = False
if dirty:
self._prj.pv = None, *pv
self._prj.set()
class Pix2SkyProjection(Projection):
"""Base class for all Pix2Sky projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split("_")[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# with no parameters:
self._prj.set()
self.inputs = ("x", "y")
self.outputs = ("phi", "theta")
@property
def input_units(self):
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def evaluate(self, x, y, *args, **kwargs):
self._update_prj()
return self._prj.prjx2s(x, y)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Sky2PixProjection(Projection):
"""Base class for all Sky2Pix projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split("_")[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# without parameters:
self._prj.set()
self.inputs = ("phi", "theta")
self.outputs = ("x", "y")
@property
def input_units(self):
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def evaluate(self, phi, theta, *args, **kwargs):
self._update_prj()
return self._prj.prjs2x(phi, theta)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Zenithal(Projection):
r"""Base class for all Zenithal projections.
Zenithal (or azimuthal) projections map the sphere directly onto a
plane. All zenithal projections are specified by defining the
radius as a function of native latitude, :math:`R_\theta`.
The pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg(-y, x) \\
R_\theta &= \sqrt{x^2 + y^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin \phi \\
y &= R_\theta \cos \phi
"""
class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Zenithal perspective projection - pixel to sky.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
\phi &= \arg(-y \cos \gamma, x) \\
\theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
where:
.. math::
\psi &= \arg(\rho, 1) \\
\omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
\rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Look angle γ in degrees (Default = 0°)",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
x &= R \sin \phi \\
y &= -R \sec \gamma \cos \theta
where:
.. math::
R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}
{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Look angle γ in degrees (Default=0°)",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Slant zenithal perspective projection - pixel to sky.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees (Default=0°)",
)
theta0 = _ParameterDS(
default=90.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees (Default=0°)",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees",
)
theta0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal):
r"""
Gnomonic projection - pixel to sky.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right)
"""
class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal):
r"""
Gnomonic Projection - sky to pixel.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cot \theta
"""
class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal):
r"""
Stereographic Projection - pixel to sky.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right)
"""
class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal):
r"""
Stereographic Projection - sky to pixel.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta}
"""
class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal):
r"""
Slant orthographic projection - pixel to sky.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
\theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right)
The parameters :math:`\xi` and :math:`\eta` are defined from the
reference point :math:`(\phi_c, \theta_c)` as:
.. math::
\xi &= \cot \theta_c \sin \phi_c \\
\eta &= - \cot \theta_c \cos \phi_c
Parameters
----------
xi : float
Obliqueness parameter, ξ. Default is 0.0.
eta : float
Obliqueness parameter, η. Default is 0.0.
"""
xi = _ParameterDS(default=0.0, description="Obliqueness parameter")
eta = _ParameterDS(default=0.0, description="Obliqueness parameter")
class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal):
r"""
Slant orthographic projection - sky to pixel.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cos \theta
But more specifically are:
.. math::
x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\
y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)]
"""
xi = _ParameterDS(default=0.0)
eta = _ParameterDS(default=0.0)
class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - R_\theta
"""
class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = 90^\circ - \theta
"""
class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right)
"""
class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\
&= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right)
"""
class Pix2Sky_Airy(Pix2SkyProjection, Zenithal):
r"""
Airy projection - pixel to sky.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(default=90.0)
class Sky2Pix_Airy(Sky2PixProjection, Zenithal):
r"""
Airy - sky to pixel.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} +
\frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right)
where:
.. math::
\xi &= \frac{90^\circ - \theta}{2} \\
\xi_b &= \frac{90^\circ - \theta_b}{2}
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(
default=90.0,
description="The latitude at which to minimize the error,in degrees",
)
class Cylindrical(Projection):
r"""Base class for Cylindrical projections.
Cylindrical projections are so-named because the surface of
projection is a cylinder.
"""
_separable = True
class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical perspective - pixel to sky.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\lambda} \\
\theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
where:
.. math::
\eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 1.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
mu = _ParameterDS(default=1.0)
lam = _ParameterDS(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError("CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError("CYP projection is not defined for lambda = -mu")
class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical):
r"""
Cylindrical Perspective - sky to pixel.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
x &= \lambda \phi \\
y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 0.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
mu = _ParameterDS(
default=1.0, description="Distance from center of sphere in spherical radii"
)
lam = _ParameterDS(
default=1.0, description="Radius of the cylinder in spherical radii"
)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError("CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError("CYP projection is not defined for lambda = -mu")
class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical equal area projection - pixel to sky.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
lam = _ParameterDS(default=1)
class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical):
r"""
Cylindrical equal area projection - sky to pixel.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda}
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = _ParameterDS(default=1)
class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical):
r"""
Plate carrée projection - pixel to sky.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= y
"""
@staticmethod
def evaluate(x, y):
# The intermediate variables are only used here for clarity
phi = np.array(x)
theta = np.array(y)
return phi, theta
class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical):
r"""
Plate carrée projection - sky to pixel.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \theta
"""
@staticmethod
def evaluate(phi, theta):
# The intermediate variables are only used here for clarity
x = np.array(phi)
y = np.array(theta)
return x, y
class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical):
r"""
Mercator - pixel to sky.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ}
"""
class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical):
r"""
Mercator - sky to pixel.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right)
"""
class PseudoCylindrical(Projection):
r"""Base class for pseudocylindrical projections.
Pseudocylindrical projections are like cylindrical projections
except the parallels of latitude are projected at diminishing
lengths toward the polar regions in order to reduce lateral
distortion there. Consequently, the meridians are curved.
"""
_separable = True
class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - pixel to sky.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\cos y} \\
\theta &= y
"""
class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - sky to pixel.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
x &= \phi \cos \theta \\
y &= \theta
"""
class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical):
r"""
Parabolic projection - pixel to sky.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
\phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\
\theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right)
"""
class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical):
r"""
Parabolic projection - sky to pixel.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\
y &= 180^\circ \sin \frac{\theta}{3}
"""
class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical):
r"""
Molleweide's projection - pixel to sky.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\
\theta &= \sin^{-1}\left(
\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right)
+ \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}
\right)
"""
class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical):
r"""
Molleweide's projection - sky to pixel.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\
y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma
where :math:`\gamma` is defined as the solution of the
transcendental equation:
.. math::
\sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi}
"""
class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - pixel to sky.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
\phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right)
"""
class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - sky to pixel.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\
y &= \gamma \sin \theta
where:
.. math::
\gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}}
"""
class Conic(Projection):
r"""Base class for conic projections.
In conic projections, the sphere is thought to be projected onto
the surface of a cone which is then opened out.
In a general sense, the pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\
R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin (C \phi) \\
y &= R_\theta \cos (C \phi) + Y_0
where :math:`C` is the "constant of the cone":
.. math::
C = \frac{180^\circ \cos \theta}{\pi R_\theta}
"""
sigma = _ParameterDS(default=90.0, getter=_to_orig_unit, setter=_to_radian)
delta = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic):
r"""
Colles' conic perspective projection - pixel to sky.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic):
r"""
Colles' conic perspective projection - sky to pixel.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic):
r"""
Alber's conic equal area projection - pixel to sky.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic):
r"""
Alber's conic equal area projection - sky to pixel.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic):
r"""
Conic equidistant projection - pixel to sky.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic):
r"""
Conic equidistant projection - sky to pixel.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic):
r"""
Conic orthomorphic projection - pixel to sky.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic):
r"""
Conic orthomorphic projection - sky to pixel.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class PseudoConic(Projection):
r"""Base class for pseudoconic projections.
Pseudoconics are a subclass of conics with concentric parallels.
"""
class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - pixel to sky.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\
\theta &= Y_0 - R_\theta
where:
.. math::
R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\
A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right)
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - sky to pixel.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
x &= R_\theta \sin A_\phi \\
y &= -R_\theta \cos A_\phi + Y_0
where:
.. math::
A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\
R_\theta &= Y_0 - \theta \\
Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Bonne conformal latitude, in degrees",
)
class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic):
r"""
Polyconic projection - pixel to sky.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic):
r"""
Polyconic projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class QuadCube(Projection):
r"""Base class for quad cube projections.
Quadrilateralized spherical cube (quad-cube) projections belong to
the class of polyhedral projections in which the sphere is
projected onto the surface of an enclosing polyhedron.
The six faces of the quad-cube projections are numbered and laid
out as::
0
4 3 2 1 4 3 2
5
"""
class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Tangential spherical cube projection - pixel to sky.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube):
r"""
Tangential spherical cube projection - sky to pixel.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class HEALPix(Projection):
r"""Base class for HEALPix projections."""
class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix):
r"""
HEALPix - pixel to sky.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(
default=4.0, description="The number of facets in longitude direction."
)
X = _ParameterDS(
default=3.0, description="The number of facets in latitude direction."
)
class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix):
r"""
HEALPix projection - sky to pixel.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(
default=4.0, description="The number of facets in longitude direction."
)
X = _ParameterDS(
default=3.0, description="The number of facets in latitude direction."
)
class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class AffineTransformation2D(Model):
"""
Perform an affine transformation in 2 dimensions.
Parameters
----------
matrix : array
A 2x2 matrix specifying the linear transformation to apply to the
inputs
translation : array
A 2D vector (given as either a 2x1 or 1x2 array) specifying a
translation to apply to the inputs
"""
n_inputs = 2
n_outputs = 2
standard_broadcasting = False
_separable = False
matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]])
translation = Parameter(default=[0.0, 0.0])
@matrix.validator
def matrix(self, value):
"""Validates that the input matrix is a 2x2 2D array."""
if np.shape(value) != (2, 2):
raise InputParameterError(
"Expected transformation matrix to be a 2x2 array"
)
@translation.validator
def translation(self, value):
"""
Validates that the translation vector is a 2D vector. This allows
either a "row" vector or a "column" vector where in the latter case the
resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``.
"""
if not (
(np.ndim(value) == 1 and np.shape(value) == (2,))
or (np.ndim(value) == 2 and np.shape(value) == (1, 2))
):
raise InputParameterError(
"Expected translation vector to be a 2 element row or column "
"vector array"
)
def __init__(self, matrix=matrix, translation=translation, **kwargs):
super().__init__(matrix=matrix, translation=translation, **kwargs)
self.inputs = ("x", "y")
self.outputs = ("x", "y")
@property
def inverse(self):
"""
Inverse transformation.
Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted.
"""
det = np.linalg.det(self.matrix.value)
if det == 0:
raise InputParameterError(
f"Transformation matrix is singular; {self.__class__.__name__} model"
" does not have an inverse"
)
matrix = np.linalg.inv(self.matrix.value)
if self.matrix.unit is not None:
matrix = matrix * self.matrix.unit
# If matrix has unit then translation has unit, so no need to assign it.
translation = -np.dot(matrix, self.translation.value)
return self.__class__(matrix=matrix, translation=translation)
@classmethod
def evaluate(cls, x, y, matrix, translation):
"""
Apply the transformation to a set of 2D Cartesian coordinates given as
two lists--one for the x coordinates and one for a y coordinates--or a
single coordinate pair.
Parameters
----------
x, y : array, float
x and y coordinates
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
shape = x.shape or (1,)
# Use asarray to ensure loose the units.
inarr = np.vstack(
[np.asarray(x).ravel(), np.asarray(y).ravel(), np.ones(x.size, x.dtype)]
)
if inarr.shape[0] != 3 or inarr.ndim != 2:
raise ValueError("Incompatible input shapes")
augmented_matrix = cls._create_augmented_matrix(matrix, translation)
result = np.dot(augmented_matrix, inarr)
x, y = result[0], result[1]
x.shape = y.shape = shape
return x, y
@staticmethod
def _create_augmented_matrix(matrix, translation):
unit = None
if any([hasattr(translation, "unit"), hasattr(matrix, "unit")]):
if not all([hasattr(translation, "unit"), hasattr(matrix, "unit")]):
raise ValueError(
"To use AffineTransformation with quantities, "
"both matrix and unit need to be quantities."
)
unit = translation.unit
# matrix should have the same units as translation
if not (matrix.unit / translation.unit) == u.dimensionless_unscaled:
raise ValueError("matrix and translation must have the same units.")
augmented_matrix = np.empty((3, 3), dtype=float)
augmented_matrix[0:2, 0:2] = matrix
augmented_matrix[0:2, 2:].flat = translation
augmented_matrix[2] = [0, 0, 1]
if unit is not None:
return augmented_matrix * unit
return augmented_matrix
@property
def input_units(self):
if self.translation.unit is None and self.matrix.unit is None:
return None
elif self.translation.unit is not None:
return dict(zip(self.inputs, [self.translation.unit] * 2))
else:
return dict(zip(self.inputs, [self.matrix.unit] * 2))
for long_name, short_name in _PROJ_NAME_CODE:
# define short-name projection equivalent classes:
globals()["Pix2Sky_" + short_name] = globals()["Pix2Sky_" + long_name]
globals()["Sky2Pix_" + short_name] = globals()["Sky2Pix_" + long_name]
# set inverse classes:
globals()["Pix2Sky_" + long_name]._inv_cls = globals()["Sky2Pix_" + long_name]
globals()["Sky2Pix_" + long_name]._inv_cls = globals()["Pix2Sky_" + long_name]
|
c3760ff93f5cd7b52b8b11d867225f7cfb251df6d74ad7b443a67955b1029b00 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for accessing, downloading, and caching data files."""
import atexit
import contextlib
import errno
import fnmatch
import ftplib
import functools
import hashlib
import io
import os
import re
import shutil
# import ssl moved inside functions using ssl to avoid import failure
# when running in pyodide/Emscripten
import sys
import urllib.error
import urllib.parse
import urllib.request
import zipfile
from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir, mkdtemp
from warnings import warn
try:
import certifi
except ImportError:
# certifi support is optional; when available it will be used for TLS/SSL
# downloads
certifi = None
import astropy.config.paths
from astropy import config as _config
from astropy.utils.compat.optional_deps import HAS_FSSPEC
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.introspection import find_current_module, resolve_name
# Order here determines order in the autosummary
__all__ = [
"Conf",
"conf",
"download_file",
"download_files_in_parallel",
"get_readable_fileobj",
"get_pkg_data_fileobj",
"get_pkg_data_filename",
"get_pkg_data_contents",
"get_pkg_data_fileobjs",
"get_pkg_data_filenames",
"get_pkg_data_path",
"is_url",
"is_url_in_cache",
"get_cached_urls",
"cache_total_size",
"cache_contents",
"export_download_cache",
"import_download_cache",
"import_file_to_cache",
"check_download_cache",
"clear_download_cache",
"compute_hash",
"get_free_space_in_dir",
"check_free_space_in_dir",
"get_file_contents",
"CacheMissingWarning",
"CacheDamaged",
]
_dataurls_to_alias = {}
class _NonClosingBufferedReader(io.BufferedReader):
def __del__(self):
try:
# NOTE: self.raw will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __del__(self):
try:
# NOTE: self.stream will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.data`.
"""
dataurl = _config.ConfigItem(
"http://data.astropy.org/", "Primary URL for astropy remote data site."
)
dataurl_mirror = _config.ConfigItem(
"http://www.astropy.org/astropy-data/",
"Mirror URL for astropy remote data site.",
)
default_http_user_agent = _config.ConfigItem(
"astropy",
"Default User-Agent for HTTP request headers. This can be overwritten "
"for a particular call via http_headers option, where available. "
"This only provides the default value when not set by https_headers.",
)
remote_timeout = _config.ConfigItem(
10.0,
"Time to wait for remote data queries (in seconds).",
aliases=["astropy.coordinates.name_resolve.name_resolve_timeout"],
)
allow_internet = _config.ConfigItem(
True, "If False, prevents any attempt to download from Internet."
)
compute_hash_block_size = _config.ConfigItem(
2**16, "Block size for computing file hashes." # 64K
)
download_block_size = _config.ConfigItem(
2**16, "Number of bytes of remote data to download per step." # 64K
)
delete_temporary_downloads_at_exit = _config.ConfigItem(
True,
"If True, temporary download files created when the cache is "
"inaccessible will be deleted at the end of the python session.",
)
conf = Conf()
class CacheMissingWarning(AstropyWarning):
"""
This warning indicates the standard cache directory is not accessible, with
the first argument providing the warning message. If args[1] is present, it
is a filename indicating the path to a temporary file that was created to
store a remote data download in the absence of the cache.
"""
def is_url(string):
"""
Test whether a string is a valid URL for :func:`download_file`.
Parameters
----------
string : str
The string to test.
Returns
-------
status : bool
String is URL or not.
"""
url = urllib.parse.urlparse(string)
# we can't just check that url.scheme is not an empty string, because
# file paths in windows would return a non-empty scheme (e.g. e:\\
# returns 'e').
return url.scheme.lower() in ["http", "https", "ftp", "sftp", "ssh", "file"]
# Backward compatibility because some downstream packages allegedly uses it.
_is_url = is_url
def _requires_fsspec(url):
"""Does the `url` require the optional ``fsspec`` dependency to open?"""
return isinstance(url, str) and url.startswith(("s3://", "gs://"))
def _is_inside(path, parent_path):
# We have to try realpath too to avoid issues with symlinks, but we leave
# abspath because some systems like debian have the absolute path (with no
# symlinks followed) match, but the real directories in different
# locations, so need to try both cases.
return os.path.abspath(path).startswith(
os.path.abspath(parent_path)
) or os.path.realpath(path).startswith(os.path.realpath(parent_path))
@contextlib.contextmanager
def get_readable_fileobj(
name_or_obj,
encoding=None,
cache=False,
show_progress=True,
remote_timeout=None,
sources=None,
http_headers=None,
*,
use_fsspec=None,
fsspec_kwargs=None,
close_files=True,
):
"""Yield a readable, seekable file-like object from a file or URL.
This supports passing filenames, URLs, and readable file-like objects,
any of which can be compressed in gzip, bzip2 or lzma (xz) if the
appropriate compression libraries are provided by the Python installation.
Notes
-----
This function is a context manager, and should be used for example
as::
with get_readable_fileobj('file.dat') as f:
contents = f.read()
If a URL is provided and the cache is in use, the provided URL will be the
name used in the cache. The contents may already be stored in the cache
under this URL provided, they may be downloaded from this URL, or they may
be downloaded from one of the locations listed in ``sources``. See
`~download_file` for details.
Parameters
----------
name_or_obj : str or file-like
The filename of the file to access (if given as a string), or
the file-like object to access.
If a file-like object, it must be opened in binary mode.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
check the remote URL for a new version but store the result
in the cache.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name_or_obj`` starts with the Amazon S3 storage prefix ``s3://``
or the Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g. ``http://``) but in this case you must
explicitly pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
close_files : bool, optional
Close the file object when exiting the context manager.
Default is `True`.
.. versionadded:: 5.2
Returns
-------
file : readable file-like
"""
# close_fds is a list of file handles created by this function
# that need to be closed. We don't want to always just close the
# returned file handle, because it may simply be the file handle
# passed in. In that case it is not the responsibility of this
# function to close it: doing so could result in a "double close"
# and an "invalid file descriptor" exception.
close_fds = []
delete_fds = []
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
# Have `use_fsspec` default to ``True`` if the user passed an Amazon S3
# or Google Cloud Storage URI.
if use_fsspec is None and _requires_fsspec(name_or_obj):
use_fsspec = True
if use_fsspec:
if not isinstance(name_or_obj, str):
raise TypeError("`name_or_obj` must be a string when `use_fsspec=True`")
if fsspec_kwargs is None:
fsspec_kwargs = {}
# name_or_obj could be an os.PathLike object
if isinstance(name_or_obj, os.PathLike):
name_or_obj = os.fspath(name_or_obj)
# Get a file object to the content
if isinstance(name_or_obj, str):
# Use fsspec to open certain cloud-hosted files (e.g., AWS S3, Google Cloud Storage)
if use_fsspec:
if not HAS_FSSPEC:
raise ModuleNotFoundError("please install `fsspec` to open this file")
import fsspec # local import because it is a niche dependency
openfileobj = fsspec.open(name_or_obj, **fsspec_kwargs)
close_fds.append(openfileobj)
fileobj = openfileobj.open()
close_fds.append(fileobj)
else:
is_url = _is_url(name_or_obj)
if is_url:
name_or_obj = download_file(
name_or_obj,
cache=cache,
show_progress=show_progress,
timeout=remote_timeout,
sources=sources,
http_headers=http_headers,
)
fileobj = io.FileIO(name_or_obj, "r")
if is_url and not cache:
delete_fds.append(fileobj)
close_fds.append(fileobj)
else:
fileobj = name_or_obj
# Check if the file object supports random access, and if not,
# then wrap it in a BytesIO buffer. It would be nicer to use a
# BufferedReader to avoid reading loading the whole file first,
# but that might not be compatible with all possible I/O classes.
if not hasattr(fileobj, "seek"):
try:
# py.path.LocalPath objects have .read() method but it uses
# text mode, which won't work. .read_binary() does, and
# surely other ducks would return binary contents when
# called like this.
# py.path.LocalPath is what comes from the legacy tmpdir fixture
# in pytest.
fileobj = io.BytesIO(fileobj.read_binary())
except AttributeError:
fileobj = io.BytesIO(fileobj.read())
# Now read enough bytes to look at signature
signature = fileobj.read(4)
fileobj.seek(0)
if signature[:3] == b"\x1f\x8b\x08": # gzip
import struct
try:
import gzip
fileobj_new = gzip.GzipFile(fileobj=fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really gzip
except (OSError, EOFError, struct.error): # invalid gzip file
fileobj.seek(0)
fileobj_new.close()
else:
fileobj_new.seek(0)
fileobj = fileobj_new
elif signature[:3] == b"BZh": # bzip2
try:
import bz2
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
try:
# bz2.BZ2File does not support file objects, only filenames, so we
# need to write the data to a temporary file
with NamedTemporaryFile("wb", delete=False) as tmp:
tmp.write(fileobj.read())
tmp.close()
fileobj_new = bz2.BZ2File(tmp.name, mode="rb")
fileobj_new.read(1) # need to check that the file is really bzip2
except OSError: # invalid bzip2 file
fileobj.seek(0)
fileobj_new.close()
# raise
else:
fileobj_new.seek(0)
close_fds.append(fileobj_new)
fileobj = fileobj_new
elif signature[:3] == b"\xfd7z": # xz
try:
import lzma
fileobj_new = lzma.LZMAFile(fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really xz
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the lzma module."
)
except (OSError, EOFError): # invalid xz file
fileobj.seek(0)
fileobj_new.close()
# should we propagate this to the caller to signal bad content?
# raise ValueError(e)
else:
fileobj_new.seek(0)
fileobj = fileobj_new
# By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File
# or lzma.LZMAFile instance opened in binary mode (that is, read
# returns bytes). Now we need to, if requested, wrap it in a
# io.TextIOWrapper so read will return unicode based on the
# encoding parameter.
needs_textio_wrapper = encoding != "binary"
if needs_textio_wrapper:
# A bz2.BZ2File can not be wrapped by a TextIOWrapper,
# so we decompress it to a temporary file and then
# return a handle to that.
try:
import bz2
except ImportError:
pass
else:
if isinstance(fileobj, bz2.BZ2File):
tmp = NamedTemporaryFile("wb", delete=False)
data = fileobj.read()
tmp.write(data)
tmp.close()
delete_fds.append(tmp)
fileobj = io.FileIO(tmp.name, "r")
close_fds.append(fileobj)
fileobj = _NonClosingBufferedReader(fileobj)
fileobj = _NonClosingTextIOWrapper(fileobj, encoding=encoding)
# Ensure that file is at the start - io.FileIO will for
# example not always be at the start:
# >>> import io
# >>> f = open('test.fits', 'rb')
# >>> f.read(4)
# 'SIMP'
# >>> f.seek(0)
# >>> fileobj = io.FileIO(f.fileno())
# >>> fileobj.tell()
# 4096L
fileobj.seek(0)
try:
yield fileobj
finally:
if close_files:
for fd in close_fds:
fd.close()
for fd in delete_fds:
os.remove(fd.name)
def get_file_contents(*args, **kwargs):
"""
Retrieves the contents of a filename or file-like object.
See the `get_readable_fileobj` docstring for details on parameters.
Returns
-------
object
The content of the file (as requested by ``encoding``).
"""
with get_readable_fileobj(*args, **kwargs) as f:
return f.read()
@contextlib.contextmanager
def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations for the package and
provides the file as a file-like object that reads bytes.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
fileobj : file-like
An object with the contents of the data file available via
``read`` function. Can be used as part of a ``with`` statement,
automatically closing itself after the ``with`` block.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Examples
--------
This will retrieve a data file and its contents for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('data/3d_cd.hdr',
... package='astropy.wcs.tests') as fobj:
... fcontents = fobj.read()
...
This next example would download a data file from the astropy data server
because the ``allsky/allsky_rosat.fits`` file is not present in the
source distribution. It will also save the file locally so the
next time it is accessed it won't need to be downloaded.::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
This does the same thing but does *not* cache it locally::
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_filename : returns a local name for a file containing the data
"""
datafn = get_pkg_data_path(data_name, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
with get_readable_fileobj(datafn, encoding=encoding) as fileobj:
yield fileobj
else: # remote file
with get_readable_fileobj(
conf.dataurl + data_name,
encoding=encoding,
cache=cache,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
) as fileobj:
# We read a byte to trigger any URLErrors
fileobj.read(1)
fileobj.seek(0)
yield fileobj
def get_pkg_data_filename(
data_name, package=None, show_progress=True, remote_timeout=None
):
"""
Retrieves a data file from the standard locations for the package and
provides a local filename for the data.
This function is similar to `get_pkg_data_fileobj` but returns the
file *name* instead of a readable file-like object. This means
that this function must always cache remote files locally, unlike
`get_pkg_data_fileobj`.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for the requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Returns
-------
filename : str
A file path on the local file system corresponding to the data
requested in ``data_name``.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('data/3d_cd.hdr',
... package='astropy.wcs.tests')
>>> with open(fn) as f:
... fcontents = f.read()
...
This retrieves a data file by hash either locally or from the astropy data
server::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP
>>> with open(fn) as f:
... fcontents = f.read()
...
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_fileobj : returns a file-like object with the data
"""
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
if data_name.startswith("hash/"):
# first try looking for a local version if a hash is specified
hashfn = _find_hash_fn(data_name[5:])
if hashfn is None:
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
else:
return hashfn
else:
fs_path = os.path.normpath(data_name)
datafn = get_pkg_data_path(fs_path, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
return datafn
else: # remote file
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations and returns its
contents as a bytes object.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
* A URL to some other file.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
contents : bytes
The complete contents of the file as a bytes object.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
See Also
--------
get_pkg_data_fileobj : returns a file-like object with the data
get_pkg_data_filename : returns a local name for a file containing the data
"""
with get_pkg_data_fileobj(
data_name, package=package, encoding=encoding, cache=cache
) as fd:
contents = fd.read()
return contents
def get_pkg_data_filenames(datadir, package=None, pattern="*"):
"""
Returns the path of all of the data files in a given directory
that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``.
* Remote URLs are not currently supported.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
Returns
-------
filenames : iterator of str
Paths on the local filesystem in *datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... with open(fn) as f:
... fcontents = f.read()
...
"""
path = get_pkg_data_path(datadir, package=package)
if os.path.isfile(path):
raise OSError(
"Tried to access a data directory that's actually a package data file"
)
elif os.path.isdir(path):
for filename in os.listdir(path):
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(path, filename)
else:
raise OSError("Path not found")
def get_pkg_data_fileobjs(datadir, package=None, pattern="*", encoding=None):
"""
Returns readable file objects for all of the data files in a given
directory that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``
* Remote URLs are not currently supported
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
Returns
-------
fileobjs : iterator of file object
File objects for each of the files on the local filesystem in
*datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... fcontents = fd.read()
...
"""
for fn in get_pkg_data_filenames(datadir, package=package, pattern=pattern):
with get_readable_fileobj(fn, encoding=encoding) as fd:
yield fd
def compute_hash(localfn):
"""Computes the MD5 hash for a file.
The hash for a data file is used for looking up data files in a unique
fashion. This is of particular use for tests; a test may require a
particular version of a particular file, in which case it can be accessed
via hash to get the appropriate version.
Typically, if you wish to write a test that requires a particular data
file, you will want to submit that file to the astropy data servers, and
use
e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``,
but with the hash for your file in place of the hash in the example.
Parameters
----------
localfn : str
The path to the file for which the hash should be generated.
Returns
-------
hash : str
The hex digest of the cryptographic hash for the contents of the
``localfn`` file.
"""
with open(localfn, "rb") as f:
h = hashlib.md5()
block = f.read(conf.compute_hash_block_size)
while block:
h.update(block)
block = f.read(conf.compute_hash_block_size)
return h.hexdigest()
def get_pkg_data_path(*path, package=None):
"""Get path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings for ``os.path`` joining.
package : str or None, optional, keyword-only
If specified, look for a file relative to the given package, rather
than the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
Raises
------
ImportError
Given package or module is not importable.
RuntimeError
If the local data file is outside of the package's tree.
"""
if package is None:
module = find_current_module(1, finddiff=["astropy.utils.data", "contextlib"])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, "__package__") or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if "." in module.__name__:
package = module.__name__.rpartition(".")[0]
else:
package = module.__name__
else:
package = module.__package__
else:
# package errors if it isn't a str
# so there is no need for checks in the containing if/else
module = resolve_name(package)
# module path within package
module_path = os.path.dirname(module.__file__)
full_path = os.path.join(module_path, *path)
# Check that file is inside tree.
rootpkgname = package.partition(".")[0]
rootpkg = resolve_name(rootpkgname)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(full_path, root_dir):
raise RuntimeError(
f"attempted to get a local data file outside of the {rootpkgname} tree."
)
return full_path
def _find_hash_fn(hexdigest, pkgname="astropy"):
"""
Looks for a local file by hash - returns file name if found and a valid
file, otherwise returns None.
"""
for v in cache_contents(pkgname=pkgname).values():
if compute_hash(v) == hexdigest:
return v
return None
def get_free_space_in_dir(path, unit=False):
"""
Given a path to a directory, returns the amount of free space
on that filesystem.
Parameters
----------
path : str
The path to a directory.
unit : bool or `~astropy.units.Unit`
Return the amount of free space as Quantity in the given unit,
if provided. Default is `False` for backward-compatibility.
Returns
-------
free_space : int or `~astropy.units.Quantity`
The amount of free space on the partition that the directory is on.
If ``unit=False``, it is returned as plain integer (in bytes).
"""
if not os.path.isdir(path):
raise OSError(
"Can only determine free space associated with directories, not files."
)
# Actually you can on Linux but I want to avoid code that fails
# on Windows only.
free_space = shutil.disk_usage(path).free
if unit:
from astropy import units as u
# TODO: Automatically determine best prefix to use.
if unit is True:
unit = u.byte
free_space = u.Quantity(free_space, u.byte).to(unit)
return free_space
def check_free_space_in_dir(path, size):
"""
Determines if a given directory has enough space to hold a file of
a given size.
Parameters
----------
path : str
The path to a directory.
size : int or `~astropy.units.Quantity`
A proposed filesize. If not a Quantity, assume it is in bytes.
Raises
------
OSError
There is not enough room on the filesystem.
"""
space = get_free_space_in_dir(path, unit=getattr(size, "unit", False))
if space < size:
from astropy.utils.console import human_file_size
raise OSError(
f"Not enough free space in {path} "
f"to download a {human_file_size(size)} file, "
f"only {human_file_size(space)} left"
)
class _ftptlswrapper(urllib.request.ftpwrapper):
def init(self):
self.busy = 0
self.ftp = ftplib.FTP_TLS()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
self.ftp.prot_p()
_target = "/".join(self.dirs)
self.ftp.cwd(_target)
class _FTPTLSHandler(urllib.request.FTPHandler):
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
return _ftptlswrapper(user, passwd, host, port, dirs, timeout, persistent=False)
@functools.lru_cache
def _build_urlopener(ftp_tls=False, ssl_context=None, allow_insecure=False):
"""
Helper for building a `urllib.request.build_opener` which handles TLS/SSL.
"""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
ssl_context = dict(it for it in ssl_context) if ssl_context else {}
cert_chain = {}
if "certfile" in ssl_context:
cert_chain.update(
{
"certfile": ssl_context.pop("certfile"),
"keyfile": ssl_context.pop("keyfile", None),
"password": ssl_context.pop("password", None),
}
)
elif "password" in ssl_context or "keyfile" in ssl_context:
raise ValueError(
"passing 'keyfile' or 'password' in the ssl_context argument "
"requires passing 'certfile' as well"
)
if "cafile" not in ssl_context and certifi is not None:
ssl_context["cafile"] = certifi.where()
ssl_context = ssl.create_default_context(**ssl_context)
if allow_insecure:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
if cert_chain:
ssl_context.load_cert_chain(**cert_chain)
https_handler = urllib.request.HTTPSHandler(context=ssl_context)
if ftp_tls:
urlopener = urllib.request.build_opener(_FTPTLSHandler(), https_handler)
else:
urlopener = urllib.request.build_opener(https_handler)
return urlopener
def _try_url_open(
source_url,
timeout=None,
http_headers=None,
ftp_tls=False,
ssl_context=None,
allow_insecure=False,
):
"""Helper for opening a URL while handling TLS/SSL verification issues."""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
# Always try first with a secure connection
# _build_urlopener uses lru_cache, so the ssl_context argument must be
# converted to a hashshable type (a set of 2-tuples)
ssl_context = frozenset(ssl_context.items() if ssl_context else [])
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=False
)
req = urllib.request.Request(source_url, headers=http_headers)
try:
return urlopener.open(req, timeout=timeout)
except urllib.error.URLError as exc:
reason = exc.reason
if (
isinstance(reason, ssl.SSLError)
and reason.reason == "CERTIFICATE_VERIFY_FAILED"
):
msg = (
f"Verification of TLS/SSL certificate at {source_url} "
"failed: this can mean either the server is "
"misconfigured or your local root CA certificates are "
"out-of-date; in the latter case this can usually be "
'addressed by installing the Python package "certifi" '
"(see the documentation for astropy.utils.data.download_url)"
)
if not allow_insecure:
msg += (
" or in both cases you can work around this by "
"passing allow_insecure=True, but only if you "
"understand the implications; the original error "
f"was: {reason}"
)
raise urllib.error.URLError(msg)
else:
msg += ". Re-trying with allow_insecure=True."
warn(msg, AstropyWarning)
# Try again with a new urlopener allowing insecure connections
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=True
)
return urlopener.open(req, timeout=timeout)
raise
def _download_file_from_source(
source_url,
show_progress=True,
timeout=None,
remote_url=None,
cache=False,
pkgname="astropy",
http_headers=None,
ftp_tls=None,
ssl_context=None,
allow_insecure=False,
):
from astropy.utils.console import ProgressBarOrSpinner
if not conf.allow_internet:
raise urllib.error.URLError(
f"URL {remote_url} was supposed to be downloaded but "
f"allow_internet is {conf.allow_internet}; "
"if this is unexpected check the astropy.cfg file for the option "
"allow_internet"
)
if remote_url is None:
remote_url = source_url
if http_headers is None:
http_headers = {}
if ftp_tls is None and urllib.parse.urlparse(remote_url).scheme == "ftp":
try:
return _download_file_from_source(
source_url,
show_progress=show_progress,
timeout=timeout,
remote_url=remote_url,
cache=cache,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=False,
)
except urllib.error.URLError as e:
# e.reason might not be a string, e.g. socket.gaierror
# URLError changed to report original exception in Python 3.10, 3.11 (bpo-43564)
if str(e.reason).lstrip("ftp error: ").startswith(("error_perm", "5")):
ftp_tls = True
else:
raise
with _try_url_open(
source_url,
timeout=timeout,
http_headers=http_headers,
ftp_tls=ftp_tls,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
) as remote:
info = remote.info()
try:
size = int(info["Content-Length"])
except (KeyError, ValueError, TypeError):
size = None
if size is not None:
check_free_space_in_dir(gettempdir(), size)
if cache:
dldir = _get_download_cache_loc(pkgname)
check_free_space_in_dir(dldir, size)
# If a user has overridden sys.stdout it might not have the
# isatty method, in that case assume it's not a tty
is_tty = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
if show_progress and is_tty:
progress_stream = sys.stdout
else:
progress_stream = io.StringIO()
if source_url == remote_url:
dlmsg = f"Downloading {remote_url}"
else:
dlmsg = f"Downloading {remote_url} from {source_url}"
with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p:
with NamedTemporaryFile(
prefix=f"astropy-download-{os.getpid()}-", delete=False
) as f:
try:
bytes_read = 0
block = remote.read(conf.download_block_size)
while block:
f.write(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(conf.download_block_size)
if size is not None and bytes_read > size:
raise urllib.error.URLError(
f"File was supposed to be {size} bytes but "
f"server provides more, at least {bytes_read} "
"bytes. Download failed."
)
if size is not None and bytes_read < size:
raise urllib.error.ContentTooShortError(
f"File was supposed to be {size} bytes but we "
f"only got {bytes_read} bytes. Download failed.",
content=None,
)
except BaseException:
if os.path.exists(f.name):
try:
os.remove(f.name)
except OSError:
pass
raise
return f.name
def download_file(
remote_url,
cache=False,
show_progress=True,
timeout=None,
sources=None,
pkgname="astropy",
http_headers=None,
ssl_context=None,
allow_insecure=False,
):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {"User-Agent": conf.default_http_user_agent, "Accept": "*/*"}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
cache = False
missing_cache = (
f"Cache directory cannot be read or created ({e}), "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(
f"Cache value '{cache}' was requested but "
"'update' is the only recognized string; "
"otherwise use a boolean"
)
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (
hasattr(e, "reason")
and hasattr(e.reason, "errno")
and e.reason.errno == 8
):
e.reason.strerror = f"{e.reason.strerror}. requested URL: {remote_url}"
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
"Please include primary URL in sources if you want it to be "
"included as a valid source."
)
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}"
) from errors[sources[0]]
if cache:
try:
return import_file_to_cache(
url_key,
f_name,
remove_original=True,
replace=(cache == "update"),
pkgname=pkgname,
)
except PermissionError as e:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only ({e}), unable to import "
f"downloaded file, providing data in temporary file {f_name} "
"instead."
)
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
def is_url_in_cache(url_key, pkgname="astropy"):
"""Check if a download for ``url_key`` is in the cache.
The provided ``url_key`` will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
Parameters
----------
url_key : str
The URL retrieved
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
in_cache : bool
`True` if a download for ``url_key`` is in the cache, `False` if not
or if the cache does not exist at all.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
return False
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
return os.path.exists(filename)
def cache_total_size(pkgname="astropy"):
"""Return the total size in bytes of all files in the cache."""
size = 0
dldir = _get_download_cache_loc(pkgname=pkgname)
for root, dirs, files in os.walk(dldir):
size += sum(os.path.getsize(os.path.join(root, name)) for name in files)
return size
def _do_download_files_in_parallel(kwargs):
with astropy.config.paths.set_temp_config(kwargs.pop("temp_config")):
with astropy.config.paths.set_temp_cache(kwargs.pop("temp_cache")):
return download_file(**kwargs)
def download_files_in_parallel(
urls,
cache="update",
show_progress=True,
timeout=None,
sources=None,
multiprocessing_start_method=None,
pkgname="astropy",
):
"""Download multiple files in parallel from the given URLs.
Blocks until all files have downloaded. The result is a list of
local file paths corresponding to the given urls.
The results will be stored in the cache under the values in ``urls`` even
if they are obtained from some other location via ``sources``. See
`~download_file` for details.
Parameters
----------
urls : list of str
The URLs to retrieve.
cache : bool or "update", optional
Whether to use the cache (default is `True`). If "update",
always download the remote URLs to see if new data is available
and store the result in cache.
.. versionchanged:: 4.0
The default was changed to ``"update"`` and setting it to
``False`` will print a Warning and set it to ``"update"`` again,
because the function will not work properly without cache. Using
``True`` will work as expected.
.. versionchanged:: 3.0
The default was changed to ``True`` and setting it to ``False``
will print a Warning and set it to ``True`` again, because the
function will not work properly without cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`)
timeout : float, optional
Timeout for each individual requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
sources : dict, optional
If provided, for each URL a list of URLs to try to obtain the
file from. The result will be stored under the original URL.
For any URL in this dictionary, the original URL will *not* be
tried unless it is in this list; this is to prevent long waits
for a primary server that is known to be inaccessible at the
moment.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
paths : list of str
The local file paths corresponding to the downloaded URLs.
Notes
-----
If a URL is unreachable, the downloading will grind to a halt and the
exception will propagate upward, but an unpredictable number of
files will have been successfully downloaded and will remain in
the cache.
"""
from .console import ProgressBar
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = {}
if not cache:
# See issue #6662, on windows won't work because the files are removed
# again before they can be used. On *NIX systems it will behave as if
# cache was set to True because multiprocessing cannot insert the items
# in the list of to-be-removed files. This could be fixed, but really,
# just use the cache, with update_cache if appropriate.
warn(
"Disabling the cache does not work because of multiprocessing, "
'it will be set to ``"update"``. You may need to manually remove '
"the cached files with clear_download_cache() afterwards.",
AstropyWarning,
)
cache = "update"
if show_progress:
progress = sys.stdout
else:
progress = io.BytesIO()
# Combine duplicate URLs
combined_urls = list(set(urls))
combined_paths = ProgressBar.map(
_do_download_files_in_parallel,
[
dict(
remote_url=u,
cache=cache,
show_progress=False,
timeout=timeout,
sources=sources.get(u, None),
pkgname=pkgname,
temp_cache=astropy.config.paths.set_temp_cache._temp_path,
temp_config=astropy.config.paths.set_temp_config._temp_path,
)
for u in combined_urls
],
file=progress,
multiprocess=True,
multiprocessing_start_method=multiprocessing_start_method,
)
paths = []
for url in urls:
paths.append(combined_paths[combined_urls.index(url)])
return paths
# This is used by download_file and _deltemps to determine the files to delete
# when the interpreter exits
_tempfilestodel = []
@atexit.register
def _deltemps():
global _tempfilestodel
if _tempfilestodel is not None:
while len(_tempfilestodel) > 0:
fn = _tempfilestodel.pop()
if os.path.isfile(fn):
try:
os.remove(fn)
except OSError:
# oh well we tried
# could be held open by some process, on Windows
pass
elif os.path.isdir(fn):
try:
shutil.rmtree(fn)
except OSError:
# couldn't get rid of it, sorry
# could be held open by some process, on Windows
pass
def clear_download_cache(hashorurl=None, pkgname="astropy"):
"""Clears the data file cache by deleting the local file(s).
If a URL is provided, it will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
For the purposes of this function, a file can also be identified by a hash
of its contents or by the filename under which the data is stored (as
returned by `~download_file`, for example).
Parameters
----------
hashorurl : str or None
If None, the whole cache is cleared. Otherwise, specify
a hash for the cached file that is supposed to be deleted,
the full path to a file in the cache that should be deleted,
or a URL that should be removed from the cache if present.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
# Problem arose when trying to open the cache
# Just a warning, though
msg = "Not clearing data cache - cache inaccessible due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return
try:
if hashorurl is None:
# Optional: delete old incompatible caches too
_rmtree(dldir)
elif _is_url(hashorurl):
filepath = os.path.join(dldir, _url_to_dirname(hashorurl))
_rmtree(filepath)
else:
# Not a URL, it should be either a filename or a hash
filepath = os.path.join(dldir, hashorurl)
rp = os.path.relpath(filepath, dldir)
if rp.startswith(".."):
raise RuntimeError(
"attempted to use clear_download_cache on the path "
f"{filepath} outside the data cache directory {dldir}"
)
d, f = os.path.split(rp)
if d and f in ["contents", "url"]:
# It's a filename not the hash of a URL
# so we want to zap the directory containing the
# files "url" and "contents"
filepath = os.path.join(dldir, d)
if os.path.exists(filepath):
_rmtree(filepath)
elif len(hashorurl) == 2 * hashlib.md5().digest_size and re.match(
r"[0-9a-f]+", hashorurl
):
# It's the hash of some file contents, we have to find the right file
filename = _find_hash_fn(hashorurl)
if filename is not None:
clear_download_cache(filename)
except OSError as e:
msg = "Not clearing data from cache - problem arose "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
def _get_download_cache_loc(pkgname="astropy"):
"""Finds the path to the cache directory and makes them if they don't exist.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
datadir : str
The path to the data cache directory.
"""
try:
datadir = os.path.join(
astropy.config.paths.get_cache_dir(pkgname), "download", "url"
)
if not os.path.exists(datadir):
try:
os.makedirs(datadir)
except OSError:
if not os.path.exists(datadir):
raise
elif not os.path.isdir(datadir):
raise OSError(f"Data cache directory {datadir} is not a directory")
return datadir
except OSError as e:
msg = "Remote data cache could not be accessed due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
raise
def _url_to_dirname(url):
if not _is_url(url):
raise ValueError(f"Malformed URL: '{url}'")
# Make domain names case-insensitive
# Also makes the http:// case-insensitive
urlobj = list(urllib.parse.urlsplit(url))
urlobj[1] = urlobj[1].lower()
if urlobj[0].lower() in ["http", "https"] and urlobj[1] and urlobj[2] == "":
urlobj[2] = "/"
url_c = urllib.parse.urlunsplit(urlobj)
return hashlib.md5(url_c.encode("utf-8")).hexdigest()
class ReadOnlyDict(dict):
def __setitem__(self, key, value):
raise TypeError("This object is read-only.")
_NOTHING = ReadOnlyDict({})
class CacheDamaged(ValueError):
"""Record the URL or file that was a problem.
Using clear_download_cache on the .bad_file or .bad_url attribute,
whichever is not None, should resolve this particular problem.
"""
def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs):
super().__init__(*args, **kwargs)
self.bad_urls = bad_urls if bad_urls is not None else []
self.bad_files = bad_files if bad_files is not None else []
def check_download_cache(pkgname="astropy"):
"""Do a consistency check on the cache.
.. note::
Since v5.0, this function no longer returns anything.
Because the cache is shared by all versions of ``astropy`` in all virtualenvs
run by your user, possibly concurrently, it could accumulate problems.
This could lead to hard-to-debug problems or wasted space. This function
detects a number of incorrect conditions, including nonexistent files that
are indexed, files that are indexed but in the wrong place, and, if you
request it, files whose content does not match the hash that is indexed.
This function also returns a list of non-indexed files. A few will be
associated with the shelve object; their exact names depend on the backend
used but will probably be based on ``urlmap``. The presence of other files
probably indicates that something has gone wrong and inaccessible files
have accumulated in the cache. These can be removed with
:func:`clear_download_cache`, either passing the filename returned here, or
with no arguments to empty the entire cache and return it to a
reasonable, if empty, state.
Parameters
----------
pkgname : str, optional
The package name to use to locate the download cache, i.e., for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Raises
------
`~astropy.utils.data.CacheDamaged`
To indicate a problem with the cache contents; the exception contains
a ``.bad_files`` attribute containing a set of filenames to allow the
user to use :func:`clear_download_cache` to remove the offending items.
OSError, RuntimeError
To indicate some problem with the cache structure. This may need a full
:func:`clear_download_cache` to resolve, or may indicate some kind of
misconfiguration.
"""
bad_files = set()
messages = set()
dldir = _get_download_cache_loc(pkgname=pkgname)
with os.scandir(dldir) as it:
for entry in it:
f = os.path.abspath(os.path.join(dldir, entry.name))
if entry.name.startswith("rmtree-"):
if f not in _tempfilestodel:
bad_files.add(f)
messages.add(f"Cache entry {entry.name} not scheduled for deletion")
elif entry.is_dir():
for sf in os.listdir(f):
if sf in ["url", "contents"]:
continue
sf = os.path.join(f, sf)
bad_files.add(sf)
messages.add(f"Unexpected file f{sf}")
urlf = os.path.join(f, "url")
url = None
if not os.path.isfile(urlf):
bad_files.add(urlf)
messages.add(f"Problem with URL file f{urlf}")
else:
url = get_file_contents(urlf, encoding="utf-8")
if not _is_url(url):
bad_files.add(f)
messages.add(f"Malformed URL: {url}")
else:
hashname = _url_to_dirname(url)
if entry.name != hashname:
bad_files.add(f)
messages.add(
f"URL hashes to {hashname} but is stored in"
f" {entry.name}"
)
if not os.path.isfile(os.path.join(f, "contents")):
bad_files.add(f)
if url is None:
messages.add(f"Hash {entry.name} is missing contents")
else:
messages.add(
f"URL {url} with hash {entry.name} is missing contents"
)
else:
bad_files.add(f)
messages.add(f"Left-over non-directory {f} in cache")
if bad_files:
raise CacheDamaged("\n".join(messages), bad_files=bad_files)
@contextlib.contextmanager
def _SafeTemporaryDirectory(suffix=None, prefix=None, dir=None):
"""Temporary directory context manager
This will not raise an exception if the temporary directory goes away
before it's supposed to be deleted. Specifically, what is deleted will
be the directory *name* produced; if no such directory exists, no
exception will be raised.
It would be safer to delete it only if it's really the same directory
- checked by file descriptor - and if it's still called the same thing.
But that opens a platform-specific can of worms.
It would also be more robust to use ExitStack and TemporaryDirectory,
which is more aggressive about removing readonly things.
"""
d = mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield d
finally:
try:
shutil.rmtree(d)
except OSError:
pass
def _rmtree(path, replace=None):
"""More-atomic rmtree. Ignores missing directory."""
with TemporaryDirectory(
prefix="rmtree-", dir=os.path.dirname(os.path.abspath(path))
) as d:
try:
os.rename(path, os.path.join(d, "to-zap"))
except FileNotFoundError:
pass
except PermissionError:
warn(
CacheMissingWarning(
f"Unable to remove directory {path} because a file in it "
"is in use and you are on Windows",
path,
)
)
raise
if replace is not None:
try:
os.rename(replace, path)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
def import_file_to_cache(
url_key, filename, remove_original=False, pkgname="astropy", *, replace=True
):
"""Import the on-disk file specified by filename to the cache.
The provided ``url_key`` will be the name used in the cache. The file
should contain the contents of this URL, at least notionally (the URL may
be temporarily or permanently unavailable). It is using ``url_key`` that
users will request these contents from the cache. See :func:`download_file` for
details.
If ``url_key`` already exists in the cache, it will be updated to point to
these imported contents, and its old contents will be deleted from the
cache.
Parameters
----------
url_key : str
The key to index the file under. This should probably be
the URL where the file was located, though if you obtained
it from a mirror you should use the URL of the primary
location.
filename : str
The file whose contents you want to import.
remove_original : bool
Whether to remove the original file (``filename``) once import is
complete.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
replace : boolean, optional
Whether or not to replace an existing object in the cache, if one exists.
If replacement is not requested but the object exists, silently pass.
"""
cache_dir = _get_download_cache_loc(pkgname=pkgname)
cache_dirname = _url_to_dirname(url_key)
local_dirname = os.path.join(cache_dir, cache_dirname)
local_filename = os.path.join(local_dirname, "contents")
with _SafeTemporaryDirectory(prefix="temp_dir", dir=cache_dir) as temp_dir:
temp_filename = os.path.join(temp_dir, "contents")
# Make sure we're on the same filesystem
# This will raise an exception if the url_key doesn't turn into a valid filename
shutil.copy(filename, temp_filename)
with open(os.path.join(temp_dir, "url"), "w", encoding="utf-8") as f:
f.write(url_key)
if replace:
_rmtree(local_dirname, replace=temp_dir)
else:
try:
os.rename(temp_dir, local_dirname)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
if remove_original:
os.remove(filename)
return os.path.abspath(local_filename)
def get_cached_urls(pkgname="astropy"):
"""
Get the list of URLs in the cache. Especially useful for looking up what
files are stored in your cache when you don't have internet access.
The listed URLs are the keys programs should use to access the file
contents, but those contents may have actually been obtained from a mirror.
See `~download_file` for details.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
cached_urls : list
List of cached URLs.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
return sorted(cache_contents(pkgname=pkgname).keys())
def cache_contents(pkgname="astropy"):
"""Obtain a dict mapping cached URLs to filenames.
This dictionary is a read-only snapshot of the state of the cache when this
function was called. If other processes are actively working with the
cache, it is possible for them to delete files that are listed in this
dictionary. Use with some caution if you are working on a system that is
busy with many running astropy processes, although the same issues apply to
most functions in this module.
"""
r = {}
try:
dldir = _get_download_cache_loc(pkgname=pkgname)
except OSError:
return _NOTHING
with os.scandir(dldir) as it:
for entry in it:
if entry.is_dir:
url = get_file_contents(
os.path.join(dldir, entry.name, "url"), encoding="utf-8"
)
r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents"))
return ReadOnlyDict(r)
def export_download_cache(
filename_or_obj, urls=None, overwrite=False, pkgname="astropy"
):
"""Exports the cache contents as a ZIP file.
Parameters
----------
filename_or_obj : str or file-like
Where to put the created ZIP file. Must be something the zipfile
module can write to.
urls : iterable of str or None
The URLs to include in the exported cache. The default is all
URLs currently in the cache. If a URL is included in this list
but is not currently in the cache, a KeyError will be raised.
To ensure that all are in the cache use `~download_file`
or `~download_files_in_parallel`.
overwrite : bool, optional
If filename_or_obj is a filename that exists, it will only be
overwritten if this is True.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
import_download_cache : import the contents of such a ZIP file
import_file_to_cache : import a single file directly
"""
if urls is None:
urls = get_cached_urls(pkgname)
with zipfile.ZipFile(filename_or_obj, "w" if overwrite else "x") as z:
for u in urls:
fn = download_file(u, cache=True, sources=[], pkgname=pkgname)
# Do not use os.path.join because ZIP files want
# "/" on all platforms
z_fn = urllib.parse.quote(u, safe="")
z.write(fn, z_fn)
def import_download_cache(
filename_or_obj, urls=None, update_cache=False, pkgname="astropy"
):
"""Imports the contents of a ZIP file into the cache.
Each member of the ZIP file should be named by a quoted version of the
URL whose contents it stores. These names are decoded with
:func:`~urllib.parse.unquote`.
Parameters
----------
filename_or_obj : str or file-like
Where the stored ZIP file is. Must be something the :mod:`~zipfile`
module can read from.
urls : set of str or list of str or None
The URLs to import from the ZIP file. The default is all
URLs in the file.
update_cache : bool, optional
If True, any entry in the ZIP file will overwrite the value in the
cache; if False, leave untouched any entry already in the cache.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
export_download_cache : export the contents the cache to of such a ZIP file
import_file_to_cache : import a single file directly
"""
with zipfile.ZipFile(filename_or_obj, "r") as z, TemporaryDirectory() as d:
for i, zf in enumerate(z.infolist()):
url = urllib.parse.unquote(zf.filename)
# FIXME(aarchiba): do we want some kind of validation on this URL?
# urllib.parse might do something sensible...but what URLs might
# they have?
# is_url in this file is probably a good check, not just here
# but throughout this file.
if urls is not None and url not in urls:
continue
if not update_cache and is_url_in_cache(url, pkgname=pkgname):
continue
f_temp_name = os.path.join(d, str(i))
with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp:
block = f_zip.read(conf.download_block_size)
while block:
f_temp.write(block)
block = f_zip.read(conf.download_block_size)
import_file_to_cache(
url, f_temp_name, remove_original=True, pkgname=pkgname
)
|
7fc882edf9766ba9e34b6d684bc70b306bf1a22227e65491f46318dd3f1f308d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements a class based on NDData with all Mixins.
"""
from .mixins.ndarithmetic import NDArithmeticMixin
from .mixins.ndio import NDIOMixin
from .mixins.ndslicing import NDSlicingMixin
from .nddata import NDData
__all__ = ["NDDataRef"]
class NDDataRef(NDArithmeticMixin, NDIOMixin, NDSlicingMixin, NDData):
"""Implements `NDData` with all Mixins.
This class implements a `NDData`-like container that supports reading and
writing as implemented in the ``astropy.io.registry`` and also slicing
(indexing) and simple arithmetic (add, subtract, divide and multiply).
Notes
-----
A key distinction from `NDDataArray` is that this class does not attempt
to provide anything that was not defined in any of the parent classes.
See also
--------
NDData
NDArithmeticMixin
NDSlicingMixin
NDIOMixin
Examples
--------
The mixins allow operation that are not possible with `NDData` or
`NDDataBase`, i.e. simple arithmetic::
>>> from astropy.nddata import NDDataRef, StdDevUncertainty
>>> import numpy as np
>>> data = np.ones((3,3), dtype=float)
>>> ndd1 = NDDataRef(data, uncertainty=StdDevUncertainty(data))
>>> ndd2 = NDDataRef(data, uncertainty=StdDevUncertainty(data))
>>> ndd3 = ndd1.add(ndd2)
>>> ndd3.data # doctest: +FLOAT_CMP
array([[2., 2., 2.],
[2., 2., 2.],
[2., 2., 2.]])
>>> ndd3.uncertainty.array # doctest: +FLOAT_CMP
array([[1.41421356, 1.41421356, 1.41421356],
[1.41421356, 1.41421356, 1.41421356],
[1.41421356, 1.41421356, 1.41421356]])
see `NDArithmeticMixin` for a complete list of all supported arithmetic
operations.
But also slicing (indexing) is possible::
>>> ndd4 = ndd3[1,:]
>>> ndd4.data # doctest: +FLOAT_CMP
array([2., 2., 2.])
>>> ndd4.uncertainty.array # doctest: +FLOAT_CMP
array([1.41421356, 1.41421356, 1.41421356])
See `NDSlicingMixin` for a description how slicing works (which attributes)
are sliced.
"""
pass
|
b57c782ad7b96933323fe00edd04c560115e80077df2012f9ad4c75cadcbcb03 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import numpy as np
import astropy.units as u
from astropy.coordinates import ITRS, CartesianRepresentation, SphericalRepresentation
from astropy.utils import unbroadcast
from .wcs import WCS, WCSSUB_LATITUDE, WCSSUB_LONGITUDE
__doctest_skip__ = ["wcs_to_celestial_frame", "celestial_frame_to_wcs"]
__all__ = [
"obsgeo_to_frame",
"add_stokes_axis_to_wcs",
"celestial_frame_to_wcs",
"wcs_to_celestial_frame",
"proj_plane_pixel_scales",
"proj_plane_pixel_area",
"is_proj_plane_distorted",
"non_celestial_pixel_scales",
"skycoord_to_pixel",
"pixel_to_skycoord",
"custom_wcs_to_frame_mappings",
"custom_frame_to_wcs_mappings",
"pixel_to_pixel",
"local_partial_pixel_derivatives",
"fit_wcs_from_points",
]
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = "STOKES"
newwcs.wcs.cname[add_before_ind] = "STOKES"
return newwcs
def _wcs_to_celestial_frame_builtin(wcs):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
FK4NoETerms,
Galactic,
SphericalRepresentation,
)
# Import astropy.time here otherwise setup.py fails before extensions are compiled
from astropy.time import Time
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[wcs.wcs.lng][:4]
ycoord = wcs.wcs.ctype[wcs.wcs.lat][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == "" and xcoord == "RA--" and ycoord == "DEC-":
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.0:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == "FK4":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4(equinox=equinox)
elif radesys == "FK4-NO-E":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4NoETerms(equinox=equinox)
elif radesys == "FK5":
if equinox is not None:
equinox = Time(equinox, format="jyear")
frame = FK5(equinox=equinox)
elif radesys == "ICRS":
frame = ICRS()
else:
if xcoord == "GLON" and ycoord == "GLAT":
frame = Galactic()
elif xcoord == "TLON" and ycoord == "TLAT":
# The default representation for ITRS is cartesian, but for WCS
# purposes, we need the spherical representation.
frame = ITRS(
representation_type=SphericalRepresentation,
obstime=wcs.wcs.dateobs or None,
)
else:
frame = None
return frame
def _celestial_frame_to_wcs_builtin(frame, projection="TAN"):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
BaseRADecFrame,
FK4NoETerms,
Galactic,
)
# Create a 2-dimensional WCS
wcs = WCS(naxis=2)
if isinstance(frame, BaseRADecFrame):
xcoord = "RA--"
ycoord = "DEC-"
if isinstance(frame, ICRS):
wcs.wcs.radesys = "ICRS"
elif isinstance(frame, FK4NoETerms):
wcs.wcs.radesys = "FK4-NO-E"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK4):
wcs.wcs.radesys = "FK4"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK5):
wcs.wcs.radesys = "FK5"
wcs.wcs.equinox = frame.equinox.jyear
else:
return None
elif isinstance(frame, Galactic):
xcoord = "GLON"
ycoord = "GLAT"
elif isinstance(frame, ITRS):
xcoord = "TLON"
ycoord = "TLAT"
wcs.wcs.radesys = "ITRS"
wcs.wcs.dateobs = frame.obstime.utc.isot
else:
return None
wcs.wcs.ctype = [xcoord + "-" + projection, ycoord + "-" + projection]
return wcs
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]]
class custom_wcs_to_frame_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, "__call__"):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
# Backward-compatibility
custom_frame_mappings = custom_wcs_to_frame_mappings
class custom_frame_to_wcs_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, "__call__"):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError(
"Could not determine celestial frame corresponding to the specified WCS object"
)
def celestial_frame_to_wcs(frame, projection="TAN"):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError(
"Could not determine WCS corresponding to the specified coordinate frame."
)
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : ndarray
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or _has_distortion(cwcs) # fmt: skip
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if pixarea == 0.0:
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return cd_unitary_err < maxerr
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1 - np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd)) * u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(
getattr(wcs, dist_attr) is not None
for dist_attr in ["cpdis1", "cpdis2", "det2im1", "det2im2", "sip"]
)
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode="all"):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == "all":
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == "wcs":
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode="all", cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : `~astropy.coordinates.SkyCoord` subclass
The celestial coordinates. Whatever ``cls`` type is.
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == "all":
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == "wcs":
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
def _unique_with_order_preserved(items):
"""
Return a list of unique items in the list provided, preserving the order
in which they are found.
"""
new_items = []
for item in items:
if item not in new_items:
new_items.append(item)
return new_items
def _pixel_to_world_correlation_matrix(wcs):
"""
Return a correlation matrix between the pixel coordinates and the
high level world coordinates, along with the list of high level world
coordinate classes.
The shape of the matrix is ``(n_world, n_pix)``, where ``n_world`` is the
number of high level world coordinates.
"""
# We basically want to collapse the world dimensions together that are
# combined into the same high-level objects.
# Get the following in advance as getting these properties can be expensive
all_components = wcs.low_level_wcs.world_axis_object_components
all_classes = wcs.low_level_wcs.world_axis_object_classes
axis_correlation_matrix = wcs.low_level_wcs.axis_correlation_matrix
components = _unique_with_order_preserved([c[0] for c in all_components])
matrix = np.zeros((len(components), wcs.pixel_n_dim), dtype=bool)
for iworld in range(wcs.world_n_dim):
iworld_unique = components.index(all_components[iworld][0])
matrix[iworld_unique] |= axis_correlation_matrix[iworld]
classes = [all_classes[component][0] for component in components]
return matrix, classes
def _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out):
"""
Correlation matrix between the input and output pixel coordinates for a
pixel -> world -> pixel transformation specified by two WCS instances.
The first WCS specified is the one used for the pixel -> world
transformation and the second WCS specified is the one used for the world ->
pixel transformation. The shape of the matrix is
``(n_pixel_out, n_pixel_in)``.
"""
matrix1, classes1 = _pixel_to_world_correlation_matrix(wcs_in)
matrix2, classes2 = _pixel_to_world_correlation_matrix(wcs_out)
if len(classes1) != len(classes2):
raise ValueError("The two WCS return a different number of world coordinates")
# Check if classes match uniquely
unique_match = True
mapping = []
for class1 in classes1:
matches = classes2.count(class1)
if matches == 0:
raise ValueError("The world coordinate types of the two WCS do not match")
elif matches > 1:
unique_match = False
break
else:
mapping.append(classes2.index(class1))
if unique_match:
# Classes are unique, so we need to re-order matrix2 along the world
# axis using the mapping we found above.
matrix2 = matrix2[mapping]
elif classes1 != classes2:
raise ValueError(
"World coordinate order doesn't match and automatic matching is ambiguous"
)
matrix = np.matmul(matrix2.T, matrix1)
return matrix
def _split_matrix(matrix):
"""
Given an axis correlation matrix from a WCS object, return information about
the individual WCS that can be split out.
The output is a list of tuples, where each tuple contains a list of
pixel dimensions and a list of world dimensions that can be extracted to
form a new WCS. For example, in the case of a spectral cube with the first
two world coordinates being the celestial coordinates and the third
coordinate being an uncorrelated spectral axis, the matrix would look like::
array([[ True, True, False],
[ True, True, False],
[False, False, True]])
and this function will return ``[([0, 1], [0, 1]), ([2], [2])]``.
"""
pixel_used = []
split_info = []
for ipix in range(matrix.shape[1]):
if ipix in pixel_used:
continue
pixel_include = np.zeros(matrix.shape[1], dtype=bool)
pixel_include[ipix] = True
n_pix_prev, n_pix = 0, 1
while n_pix > n_pix_prev:
world_include = matrix[:, pixel_include].any(axis=1)
pixel_include = matrix[world_include, :].any(axis=0)
n_pix_prev, n_pix = n_pix, np.sum(pixel_include)
pixel_indices = list(np.nonzero(pixel_include)[0])
world_indices = list(np.nonzero(world_include)[0])
pixel_used.extend(pixel_indices)
split_info.append((pixel_indices, world_indices))
return split_info
def pixel_to_pixel(wcs_in, wcs_out, *inputs):
"""
Transform pixel coordinates in a dataset with a WCS to pixel coordinates
in another dataset with a different WCS.
This function is designed to efficiently deal with input pixel arrays that
are broadcasted views of smaller arrays, and is compatible with any
APE14-compliant WCS.
Parameters
----------
wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the original dataset which complies with the
high-level shared APE 14 WCS API.
wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the target dataset which complies with the
high-level shared APE 14 WCS API.
*inputs :
Scalars or arrays giving the pixel coordinates to transform.
"""
# Shortcut for scalars
if np.isscalar(inputs[0]):
world_outputs = wcs_in.pixel_to_world(*inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
return wcs_out.world_to_pixel(*world_outputs)
# Remember original shape
original_shape = inputs[0].shape
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
split_info = _split_matrix(matrix)
outputs = [None] * wcs_out.pixel_n_dim
for pixel_in_indices, pixel_out_indices in split_info:
pixel_inputs = []
for ipix in range(wcs_in.pixel_n_dim):
if ipix in pixel_in_indices:
pixel_inputs.append(unbroadcast(inputs[ipix]))
else:
pixel_inputs.append(inputs[ipix].flat[0])
pixel_inputs = np.broadcast_arrays(*pixel_inputs)
world_outputs = wcs_in.pixel_to_world(*pixel_inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
pixel_outputs = wcs_out.world_to_pixel(*world_outputs)
if wcs_out.pixel_n_dim == 1:
pixel_outputs = (pixel_outputs,)
for ipix in range(wcs_out.pixel_n_dim):
if ipix in pixel_out_indices:
outputs[ipix] = np.broadcast_to(pixel_outputs[ipix], original_shape)
return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs
def local_partial_pixel_derivatives(wcs, *pixel, normalize_by_world=False):
"""
Return a matrix of shape ``(world_n_dim, pixel_n_dim)`` where each entry
``[i, j]`` is the partial derivative d(world_i)/d(pixel_j) at the requested
pixel position.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS transformation to evaluate the derivatives for.
*pixel : float
The scalar pixel coordinates at which to evaluate the derivatives.
normalize_by_world : bool
If `True`, the matrix is normalized so that for each world entry
the derivatives add up to 1.
"""
# Find the world coordinates at the requested pixel
pixel_ref = np.array(pixel)
world_ref = np.array(wcs.pixel_to_world_values(*pixel_ref))
# Set up the derivative matrix
derivatives = np.zeros((wcs.world_n_dim, wcs.pixel_n_dim))
for i in range(wcs.pixel_n_dim):
pixel_off = pixel_ref.copy()
pixel_off[i] += 1
world_off = np.array(wcs.pixel_to_world_values(*pixel_off))
derivatives[:, i] = world_off - world_ref
if normalize_by_world:
derivatives /= derivatives.sum(axis=0)[:, np.newaxis]
return derivatives
def _linear_wcs_fit(params, lon, lat, x, y, w_obj):
"""
Objective function for fitting linear terms.
Parameters
----------
params : array
6 element array. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
x, y: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
cd = params[0:4]
crpix = params[4:6]
w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))
w_obj.wcs.crpix = crpix
lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)
lat_resids = lat - lat2
lon_resids = lon - lon2
# In case the longitude has wrapped around
lon_resids = np.mod(lon_resids - 180.0, 360.0) - 180.0
resids = np.concatenate((lon_resids * np.cos(np.radians(lat)), lat_resids))
return resids
def _sip_fit(params, lon, lat, u, v, w_obj, order, coeff_names):
"""Objective function for fitting SIP.
Parameters
----------
params : array
Fittable parameters. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
u, v: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
from astropy.modeling.models import SIP # here to avoid circular import
# unpack params
crpix = params[0:2]
cdx = params[2:6].reshape((2, 2))
a_params = params[6 : 6 + len(coeff_names)]
b_params = params[6 + len(coeff_names) :]
# assign to wcs, used for transformations in this function
w_obj.wcs.cd = cdx
w_obj.wcs.crpix = crpix
a_coeff, b_coeff = {}, {}
for i in range(len(coeff_names)):
a_coeff["A_" + coeff_names[i]] = a_params[i]
b_coeff["B_" + coeff_names[i]] = b_params[i]
sip = SIP(
crpix=crpix, a_order=order, b_order=order, a_coeff=a_coeff, b_coeff=b_coeff
)
fuv, guv = sip(u, v)
xo, yo = np.dot(cdx, np.array([u + fuv - crpix[0], v + guv - crpix[1]]))
# use all pix2world in case `projection` contains distortion table
x, y = w_obj.all_world2pix(lon, lat, 0)
x, y = np.dot(w_obj.wcs.cd, (x - w_obj.wcs.crpix[0], y - w_obj.wcs.crpix[1]))
resids = np.concatenate((x - xo, y - yo))
return resids
def fit_wcs_from_points(
xy, world_coords, proj_point="center", projection="TAN", sip_degree=None
):
"""
Given two matching sets of coordinates on detector and sky,
compute the WCS.
Fits a WCS object to matched set of input detector and sky coordinates.
Optionally, a SIP can be fit to account for geometric
distortion. Returns an `~astropy.wcs.WCS` object with the best fit
parameters for mapping between input pixel and sky coordinates.
The projection type (default 'TAN') can passed in as a string, one of
the valid three-letter projection codes - or as a WCS object with
projection keywords already set. Note that if an input WCS has any
non-polynomial distortion, this will be applied and reflected in the
fit terms and coefficients. Passing in a WCS object in this way essentially
allows it to be refit based on the matched input coordinates and projection
point, but take care when using this option as non-projection related
keywords in the input might cause unexpected behavior.
Notes
-----
- The fiducial point for the spherical projection can be set to 'center'
to use the mean position of input sky coordinates, or as an
`~astropy.coordinates.SkyCoord` object.
- Units in all output WCS objects will always be in degrees.
- If the coordinate frame differs between `~astropy.coordinates.SkyCoord`
objects passed in for ``world_coords`` and ``proj_point``, the frame for
``world_coords`` will override as the frame for the output WCS.
- If a WCS object is passed in to ``projection`` the CD/PC matrix will
be used as an initial guess for the fit. If this is known to be
significantly off and may throw off the fit, set to the identity matrix
(for example, by doing wcs.wcs.pc = [(1., 0.,), (0., 1.)])
Parameters
----------
xy : (`numpy.ndarray`, `numpy.ndarray`) tuple
x & y pixel coordinates.
world_coords : `~astropy.coordinates.SkyCoord`
Skycoord object with world coordinates.
proj_point : 'center' or ~astropy.coordinates.SkyCoord`
Defaults to 'center', in which the geometric center of input world
coordinates will be used as the projection point. To specify an exact
point for the projection, a Skycoord object with a coordinate pair can
be passed in. For consistency, the units and frame of these coordinates
will be transformed to match ``world_coords`` if they don't.
projection : str or `~astropy.wcs.WCS`
Three letter projection code, of any of standard projections defined
in the FITS WCS standard. Optionally, a WCS object with projection
keywords set may be passed in.
sip_degree : None or int
If set to a non-zero integer value, will fit SIP of degree
``sip_degree`` to model geometric distortion. Defaults to None, meaning
no distortion corrections will be fit.
Returns
-------
wcs : `~astropy.wcs.WCS`
The best-fit WCS to the points given.
"""
from scipy.optimize import least_squares
import astropy.units as u
from astropy.coordinates import SkyCoord # here to avoid circular import
from .wcs import Sip
xp, yp = xy
try:
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
except AttributeError:
unit_sph = world_coords.unit_spherical
lon, lat = unit_sph.lon.deg, unit_sph.lat.deg
# verify input
if (type(proj_point) != type(world_coords)) and (proj_point != "center"):
raise ValueError(
"proj_point must be set to 'center', or an"
+ "`~astropy.coordinates.SkyCoord` object with "
+ "a pair of points."
)
use_center_as_proj_point = str(proj_point) == "center"
if not use_center_as_proj_point:
assert proj_point.size == 1
proj_codes = [
"AZP",
"SZP",
"TAN",
"STG",
"SIN",
"ARC",
"ZEA",
"AIR",
"CYP",
"CEA",
"CAR",
"MER",
"SFL",
"PAR",
"MOL",
"AIT",
"COP",
"COE",
"COD",
"COO",
"BON",
"PCO",
"TSC",
"CSC",
"QSC",
"HPX",
"XPH",
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError(
"Must specify valid projection code from list of "
+ "supported types: ",
", ".join(proj_codes),
)
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame, projection=projection)
else: # if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.cdelt = (1.0, 1.0) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__("pc")
if (type(sip_degree) != type(None)) and (type(sip_degree) != int):
raise ValueError("sip_degree must be None, or integer.")
# compute bounding box for sources in image coordinates:
xpmin, xpmax, ypmin, ypmax = xp.min(), xp.max(), yp.min(), yp.max()
# set pixel_shape to span of input points
wcs.pixel_shape = (
1 if xpmax <= 0.0 else int(np.ceil(xpmax)),
1 if ypmax <= 0.0 else int(np.ceil(ypmax)),
)
# determine CRVAL from input
close = lambda l, p: p[np.argmin(np.abs(l))]
if use_center_as_proj_point: # use center of input points
sc1 = SkyCoord(lon.min() * u.deg, lat.max() * u.deg)
sc2 = SkyCoord(lon.max() * u.deg, lat.min() * u.deg)
pa = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
midpoint_sc = sc1.directional_offset_by(pa, sep / 2)
wcs.wcs.crval = (midpoint_sc.data.lon.deg, midpoint_sc.data.lat.deg)
wcs.wcs.crpix = ((xpmax + xpmin) / 2.0, (ypmax + ypmin) / 2.0)
else: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (
close(lon - wcs.wcs.crval[0], xp + 1),
close(lon - wcs.wcs.crval[1], yp + 1),
)
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
# Use bounds to require that the fit center pixel is on the input image
if xpmin == xpmax:
xpmin, xpmax = xpmin - 0.5, xpmax + 0.5
if ypmin == ypmax:
ypmin, ypmax = ypmin - 0.5, ypmax + 0.5
p0 = np.concatenate([wcs.wcs.cd.flatten(), wcs.wcs.crpix.flatten()])
fit = least_squares(
_linear_wcs_fit,
p0,
args=(lon, lat, xp, yp, wcs),
bounds=[
[-np.inf, -np.inf, -np.inf, -np.inf, xpmin + 1, ypmin + 1],
[np.inf, np.inf, np.inf, np.inf, xpmax + 1, ypmax + 1],
],
)
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_degree:
degree = sip_degree
if "-SIP" not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + "-SIP" for x in wcs.wcs.ctype]
coef_names = [
f"{i}_{j}"
for i in range(degree + 1)
for j in range(degree + 1)
if (i + j) < (degree + 1) and (i + j) > 1
]
p0 = np.concatenate(
(
np.array(wcs.wcs.crpix),
wcs.wcs.cd.flatten(),
np.zeros(2 * len(coef_names)),
)
)
fit = least_squares(
_sip_fit,
p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names),
bounds=[
[xpmin + 1, ypmin + 1] + [-np.inf] * (4 + 2 * len(coef_names)),
[xpmax + 1, ypmax + 1] + [np.inf] * (4 + 2 * len(coef_names)),
],
)
coef_fit = (
list(fit.x[6 : 6 + len(coef_names)]),
list(fit.x[6 + len(coef_names) :]),
)
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree + 1, degree + 1))
b_vals = np.zeros((degree + 1, degree + 1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(
a_vals,
b_vals,
np.zeros((degree + 1, degree + 1)),
np.zeros((degree + 1, degree + 1)),
wcs.wcs.crpix,
)
return wcs
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an ITRS coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time associated with the coordinate, will be passed to
`~astropy.coordinates.ITRS` as the obstime keyword.
Returns
-------
~astropy.coordinates.ITRS
An `~astropy.coordinates.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (
obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
):
raise ValueError(
f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array"
)
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
|
c51950b15016434dfcf1fbfec154046f47512b9eef17090e4792da5592b470dc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# It gets to be really tedious to type long docstrings in ANSI C
# syntax (since multi-line string literals are not valid).
# Therefore, the docstrings are written here in doc/docstrings.py,
# which are then converted by setup.py into docstrings.h, which is
# included by pywcs.c
__all__ = ["TWO_OR_MORE_ARGS", "RETURNS", "ORIGIN", "RA_DEC_ORDER"]
def _fix(content, indent=0):
lines = content.split("\n")
indent = "\n" + " " * indent
return indent.join(lines)
def TWO_OR_MORE_ARGS(naxis, indent=0):
return _fix(
f"""*args
There are two accepted forms for the positional arguments:
- 2 arguments: An *N* x *{naxis}* array of coordinates, and an
*origin*.
- more than 2 arguments: An array for each axis, followed by
an *origin*. These arrays must be broadcastable to one
another.
Here, *origin* is the coordinate in the upper left corner of the
image. In FITS and Fortran standards, this is 1. In Numpy and C
standards this is 0.
""",
indent,
)
def RETURNS(out_type, indent=0):
return _fix(
f"""result : array
Returns the {out_type}. If the input was a single array and
origin, a single array is returned, otherwise a tuple of arrays is
returned.""",
indent,
)
def ORIGIN(indent=0):
return _fix(
"""
origin : int
Specifies the origin of pixel values. The Fortran and FITS
standards use an origin of 1. Numpy and C use array indexing with
origin at 0.
""",
indent,
)
def RA_DEC_ORDER(indent=0):
return _fix(
"""
ra_dec_order : bool, optional
When `True` will ensure that world coordinates are always given
and returned in as (*ra*, *dec*) pairs, regardless of the order of
the axes specified by the in the ``CTYPE`` keywords. Default is
`False`.
""",
indent,
)
a = """
``double array[a_order+1][a_order+1]`` Focal plane transformation
matrix.
The `SIP`_ ``A_i_j`` matrix used for pixel to focal plane
transformation.
Its values may be changed in place, but it may not be resized, without
creating a new `~astropy.wcs.Sip` object.
"""
a_order = """
``int`` (read-only) Order of the polynomial (``A_ORDER``).
"""
all_pix2world = f"""
all_pix2world(pixcrd, origin) -> ``double array[ncoord][nelem]``
Transforms pixel coordinates to world coordinates.
Does the following:
- Detector to image plane correction (if present)
- SIP distortion correction (if present)
- FITS WCS distortion correction (if present)
- wcslib "core" WCS transformation
The first three (the distortion corrections) are done in parallel.
Parameters
----------
pixcrd : ndarray
Array of pixel coordinates as ``double array[ncoord][nelem]``.
{ORIGIN()}
Returns
-------
world : ndarray
Returns an array of world coordinates as ``double array[ncoord][nelem]``.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
alt = """
``str`` Character code for alternate coordinate descriptions.
For example, the ``"a"`` in keyword names such as ``CTYPEia``. This
is a space character for the primary coordinate description, or one of
the 26 upper-case letters, A-Z.
"""
ap = """
``double array[ap_order+1][ap_order+1]`` Focal plane to pixel
transformation matrix.
The `SIP`_ ``AP_i_j`` matrix used for focal plane to pixel
transformation. Its values may be changed in place, but it may not be
resized, without creating a new `~astropy.wcs.Sip` object.
"""
ap_order = """
``int`` (read-only) Order of the polynomial (``AP_ORDER``).
"""
cel = """
`~astropy.wcs.Celprm` Information required to transform celestial coordinates.
"""
Celprm = """
Class that contains information required to transform celestial coordinates.
It consists of certain members that must be set by the user (given) and others
that are set by the WCSLIB routines (returned).
Some of the latter are supplied for informational purposes and others are for
internal use only.
"""
Prjprm = """
Class that contains information needed to project or deproject native spherical coordinates.
It consists of certain members that must be set by the user (given) and others
that are set by the WCSLIB routines (returned).
Some of the latter are supplied for informational purposes and others are for
internal use only.
"""
aux = """
`~astropy.wcs.Auxprm` Auxiliary coordinate system information of a specialist nature.
"""
Auxprm = """
Class that contains auxiliary coordinate system information of a specialist
nature.
This class can not be constructed directly from Python, but instead is
returned from `~astropy.wcs.Wcsprm.aux`.
"""
axis_types = """
``int array[naxis]`` An array of four-digit type codes for each axis.
- First digit (i.e. 1000s):
- 0: Non-specific coordinate type.
- 1: Stokes coordinate.
- 2: Celestial coordinate (including ``CUBEFACE``).
- 3: Spectral coordinate.
- Second digit (i.e. 100s):
- 0: Linear axis.
- 1: Quantized axis (``STOKES``, ``CUBEFACE``).
- 2: Non-linear celestial axis.
- 3: Non-linear spectral axis.
- 4: Logarithmic axis.
- 5: Tabular axis.
- Third digit (i.e. 10s):
- 0: Group number, e.g. lookup table number
- The fourth digit is used as a qualifier depending on the axis type.
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables: the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
have its type set to -1 and generate an error.
"""
b = """
``double array[b_order+1][b_order+1]`` Pixel to focal plane
transformation matrix.
The `SIP`_ ``B_i_j`` matrix used for pixel to focal plane
transformation. Its values may be changed in place, but it may not be
resized, without creating a new `~astropy.wcs.Sip` object.
"""
b_order = """
``int`` (read-only) Order of the polynomial (``B_ORDER``).
"""
bounds_check = """
bounds_check(pix2world, world2pix)
Enable/disable bounds checking.
Parameters
----------
pix2world : bool, optional
When `True`, enable bounds checking for the pixel-to-world (p2x)
transformations. Default is `True`.
world2pix : bool, optional
When `True`, enable bounds checking for the world-to-pixel (s2x)
transformations. Default is `True`.
Notes
-----
Note that by default (without calling `bounds_check`) strict bounds
checking is enabled.
"""
bp = """
``double array[bp_order+1][bp_order+1]`` Focal plane to pixel
transformation matrix.
The `SIP`_ ``BP_i_j`` matrix used for focal plane to pixel
transformation. Its values may be changed in place, but it may not be
resized, without creating a new `~astropy.wcs.Sip` object.
"""
bp_order = """
``int`` (read-only) Order of the polynomial (``BP_ORDER``).
"""
cd = """
``double array[naxis][naxis]`` The ``CDi_ja`` linear transformation
matrix.
For historical compatibility, three alternate specifications of the
linear transformations are available in wcslib. The canonical
``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated
``CROTAia`` keywords. Although the latter may not formally co-exist
with ``PCi_ja``, the approach here is simply to ignore them if given
in conjunction with ``PCi_ja``.
`~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and
`~astropy.wcs.Wcsprm.has_crota` can be used to determine which of
these alternatives are present in the header.
These alternate specifications of the linear transformation matrix are
translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and
are nowhere visible to the lower-level routines. In particular,
`~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity
if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is
associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts
to a unity ``PCi_ja`` matrix.
"""
cdelt = """
``double array[naxis]`` Coordinate increments (``CDELTia``) for each
coord axis.
If a ``CDi_ja`` linear transformation matrix is present, a warning is
raised and `~astropy.wcs.Wcsprm.cdelt` is ignored. The ``CDi_ja``
matrix may be deleted by::
del wcs.wcs.cd
An undefined value is represented by NaN.
"""
cdfix = """
cdfix()
Fix erroneously omitted ``CDi_ja`` keywords.
Sets the diagonal element of the ``CDi_ja`` matrix to unity if all
``CDi_ja`` keywords associated with a given axis were omitted.
According to Paper I, if any ``CDi_ja`` keywords at all are given in a
FITS header then those not given default to zero. This results in a
singular matrix with an intersecting row and column of zeros.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
cel_offset = """
``boolean`` Is there an offset?
If `True`, an offset will be applied to ``(x, y)`` to force ``(x, y) =
(0, 0)`` at the fiducial point, (phi_0, theta_0). Default is `False`.
"""
celprm_phi0 = r"""
`float`, `None`. The native longitude, :math:`\phi_0`, in degrees of the
fiducial point, i.e., the point whose celestial coordinates are given in
''Celprm.ref[0:1]''. If `None` or ``nan``, the initialization routine,
``celset()``, will set this to a projection-specific default.
"""
celprm_theta0 = r"""
`float`, `None`. The native latitude, :math:`\theta_0`, in degrees of the
fiducial point, i.e. the point whose celestial coordinates are given in
``Celprm:ref[0:1]``. If `None` or ``nan``, the initialization routine,
``celset()``, will set this to a projection-specific default.
"""
celprm_ref = """
``numpy.ndarray`` with 4 elements.
(Given) The first pair of values should be set to the celestial longitude and
latitude of the fiducial point in degrees - typically right ascension and
declination. These are given by the ``CRVALia`` keywords in ``FITS``.
(Given and returned) The second pair of values are the native longitude,
``phi_p`` (in degrees), and latitude, ``theta_p`` (in degrees), of the
celestial pole (the latter is the same as the celestial latitude of the
native pole, ``delta_p``) and these are given by the ``FITS`` keywords
``LONPOLEa`` and ``LATPOLEa`` (or by ``PVi_2a`` and ``PVi_3a`` attached
to the longitude axis which take precedence if defined).
``LONPOLEa`` defaults to ``phi0`` if the celestial latitude of the fiducial
point of the projection is greater than or equal to the native latitude,
otherwise ``phi0 + 180`` (degrees). (This is the condition for the celestial
latitude to increase in the same direction as the native latitude at the
fiducial point.) ``ref[2]`` may be set to `None` or ``numpy.nan``
or 999.0 to indicate that the correct default should be substituted.
``theta_p``, the native latitude of the celestial pole (or equally the
celestial latitude of the native pole, ``delta_p``) is often determined
uniquely by ``CRVALia`` and ``LONPOLEa`` in which case ``LATPOLEa`` is ignored.
However, in some circumstances there are two valid solutions for ``theta_p``
and ``LATPOLEa`` is used to choose between them. ``LATPOLEa`` is set in
``ref[3]`` and the solution closest to this value is used to reset ``ref[3]``.
It is therefore legitimate, for example, to set ``ref[3]`` to ``+90.0``
to choose the more northerly solution - the default if the ``LATPOLEa`` keyword
is omitted from the ``FITS`` header. For the special case where the fiducial
point of the projection is at native latitude zero, its celestial latitude
is zero, and ``LONPOLEa`` = ``+/- 90.0`` then the celestial latitude of the
native pole is not determined by the first three reference values and
``LATPOLEa`` specifies it completely.
The returned value, celprm.latpreq, specifies how ``LATPOLEa``
was actually used."""
celprm_euler = """
*Read-only* ``numpy.ndarray`` with 5 elements. Euler angles and associated
intermediaries derived from the coordinate reference values. The first three
values are the ``Z-``, ``X-``, and ``Z``-Euler angles in degrees, and the
remaining two are the cosine and sine of the ``X``-Euler angle.
"""
celprm_latpreq = """
``int``, *read-only*. For informational purposes, this indicates how the
``LATPOLEa`` keyword was used:
- 0: Not required, ``theta_p == delta_p`` was determined uniquely by the
``CRVALia`` and ``LONPOLEa`` keywords.
- 1: Required to select between two valid solutions of ``theta_p``.
- 2: ``theta_p`` was specified solely by ``LATPOLEa``.
"""
celprm_isolat = """
``bool``, *read-only*. True if the spherical rotation preserves the magnitude
of the latitude, which occurs if the axes of the native and celestial
coordinates are coincident. It signals an opportunity to cache intermediate
calculations common to all elements in a vector computation.
"""
celprm_prj = """
*Read-only* Celestial transformation parameters. Some members of `Prjprm`
are read-write, i.e., can be set by the user. For more details, see
documentation for `Prjprm`.
"""
prjprm_r0 = r"""
The radius of the generating sphere for the projection, a linear scaling
parameter. If this is zero, it will be reset to its default value of
:math:`180^\circ/\pi` (the value for FITS WCS).
"""
prjprm_code = """
Three-letter projection code defined by the FITS standard.
"""
prjprm_pv = """
Projection parameters. These correspond to the ``PVi_ma`` keywords in FITS,
so ``pv[0]`` is ``PVi_0a``, ``pv[1]`` is ``PVi_1a``, etc., where ``i`` denotes
the latitude-like axis. Many projections use ``pv[1]`` (``PVi_1a``),
some also use ``pv[2]`` (``PVi_2a``) and ``SZP`` uses ``pv[3]`` (``PVi_3a``).
``ZPN`` is currently the only projection that uses any of the others.
When setting ``pv`` values using lists or ``numpy.ndarray``,
elements set to `None` will be left unchanged while those set to ``numpy.nan``
will be set to ``WCSLIB``'s ``UNDEFINED`` special value. For efficiency
purposes, if supplied list or ``numpy.ndarray`` is shorter than the length of
the ``pv`` member, then remaining values in ``pv`` will be left unchanged.
.. note::
When retrieving ``pv``, a copy of the ``prjprm.pv`` array is returned.
Modifying this array values will not modify underlying ``WCSLIB``'s
``prjprm.pv`` data.
"""
prjprm_pvi = """
Set/Get projection parameters for specific index. These correspond to the
``PVi_ma`` keywords in FITS, so ``pv[0]`` is ``PVi_0a``, ``pv[1]`` is
``PVi_1a``, etc., where ``i`` denotes the latitude-like axis.
Many projections use ``pv[1]`` (``PVi_1a``),
some also use ``pv[2]`` (``PVi_2a``) and ``SZP`` uses ``pv[3]`` (``PVi_3a``).
``ZPN`` is currently the only projection that uses any of the others.
Setting a ``pvi`` value to `None` will reset the corresponding ``WCSLIB``'s
``prjprm.pv`` element to the default value as set by ``WCSLIB``'s ``prjini()``.
Setting a ``pvi`` value to ``numpy.nan`` will set the corresponding
``WCSLIB``'s ``prjprm.pv`` element to ``WCSLIB``'s ``UNDEFINED`` special value.
"""
prjprm_phi0 = r"""
The native longitude, :math:`\phi_0` (in degrees) of the reference point,
i.e. the point ``(x,y) = (0,0)``. If undefined the initialization routine
will set this to a projection-specific default.
"""
prjprm_theta0 = r"""
the native latitude, :math:`\theta_0` (in degrees) of the reference point,
i.e. the point ``(x,y) = (0,0)``. If undefined the initialization routine
will set this to a projection-specific default.
"""
prjprm_bounds = """
Controls bounds checking. If ``bounds&1`` then enable strict bounds checking
for the spherical-to-Cartesian (``s2x``) transformation for the
``AZP``, ``SZP``, ``TAN``, ``SIN``, ``ZPN``, and ``COP`` projections.
If ``bounds&2`` then enable strict bounds checking for the
Cartesian-to-spherical transformation (``x2s``) for the ``HPX`` and ``XPH``
projections. If ``bounds&4`` then the Cartesian- to-spherical transformations
(``x2s``) will invoke WCSLIB's ``prjbchk()`` to perform bounds checking on the
computed native coordinates, with a tolerance set to suit each projection.
bounds is set to 7 during initialization by default which enables all checks.
Zero it to disable all checking.
It is not necessary to reset the ``Prjprm`` struct (via ``Prjprm.set()``) when
``bounds`` is changed.
"""
prjprm_name = """
*Read-only.* Long name of the projection.
"""
prjprm_category = """
*Read-only.* Projection category matching the value of the relevant ``wcs``
module constants:
PRJ_ZENITHAL,
PRJ_CYLINDRICAL,
PRJ_PSEUDOCYLINDRICAL,
PRJ_CONVENTIONAL,
PRJ_CONIC,
PRJ_POLYCONIC,
PRJ_QUADCUBE, and
PRJ_HEALPIX.
"""
prjprm_w = """
*Read-only.* Intermediate floating-point values derived from the projection
parameters, cached here to save recomputation.
.. note::
When retrieving ``w``, a copy of the ``prjprm.w`` array is returned.
Modifying this array values will not modify underlying ``WCSLIB``'s
``prjprm.w`` data.
"""
prjprm_pvrange = """
*Read-only.* Range of projection parameter indices: 100 times the first allowed
index plus the number of parameters, e.g. ``TAN`` is 0 (no parameters),
``SZP`` is 103 (1 to 3), and ``ZPN`` is 30 (0 to 29).
"""
prjprm_simplezen = """
*Read-only.* True if the projection is a radially-symmetric zenithal projection.
"""
prjprm_equiareal = """
*Read-only.* True if the projection is equal area.
"""
prjprm_conformal = """
*Read-only.* True if the projection is conformal.
"""
prjprm_global_projection = """
*Read-only.* True if the projection can represent the whole sphere in a finite,
non-overlapped mapping.
"""
prjprm_divergent = """
*Read-only.* True if the projection diverges in latitude.
"""
prjprm_x0 = r"""
*Read-only.* The offset in ``x`` used to force :math:`(x,y) = (0,0)` at
:math:`(\phi_0, \theta_0)`.
"""
prjprm_y0 = r"""
*Read-only.* The offset in ``y`` used to force :math:`(x,y) = (0,0)` at
:math:`(\phi_0, \theta_0)`.
"""
prjprm_m = """
*Read-only.* Intermediate integer value (used only for the ``ZPN`` and ``HPX`` projections).
"""
prjprm_n = """
*Read-only.* Intermediate integer value (used only for the ``ZPN`` and ``HPX`` projections).
"""
prjprm_set = """
This method sets up a ``Prjprm`` object according to information supplied
within it.
Note that this routine need not be called directly; it will be invoked by
`prjx2s` and `prjs2x` if ``Prjprm.flag`` is anything other than a predefined
magic value.
The one important property of ``set()`` is that the projection code must be
defined in the ``Prjprm`` in order for ``set()`` to identify the required
projection.
Raises
------
MemoryError
Null ``prjprm`` pointer passed to WCSLIB routines.
InvalidPrjParametersError
Invalid projection parameters.
InvalidCoordinateError
One or more of the ``(x,y)`` or ``(lon,lat)`` coordinates were invalid.
"""
prjprm_prjx2s = r"""
Deproject Cartesian ``(x,y)`` coordinates in the plane of projection to native
spherical coordinates :math:`(\phi,\theta)`.
The projection is that specified by ``Prjprm.code``.
Parameters
----------
x, y : numpy.ndarray
Arrays corresponding to the first (``x``) and second (``y``) projected
coordinates.
Returns
-------
phi, theta : tuple of numpy.ndarray
Longitude and latitude :math:`(\phi,\theta)` of the projected point in
native spherical coordinates (in degrees). Values corresponding to
invalid ``(x,y)`` coordinates are set to ``numpy.nan``.
Raises
------
MemoryError
Null ``prjprm`` pointer passed to WCSLIB routines.
InvalidPrjParametersError
Invalid projection parameters.
"""
prjprm_prjs2x = r"""
Project native spherical coordinates :math:`(\phi,\theta)` to Cartesian
``(x,y)`` coordinates in the plane of projection.
The projection is that specified by ``Prjprm.code``.
Parameters
----------
phi : numpy.ndarray
Array corresponding to the longitude :math:`\phi` of the projected point
in native spherical coordinates (in degrees).
theta : numpy.ndarray
Array corresponding to the longitude :math:`\theta` of the projected point
in native spherical coordinates (in degrees). Values corresponding to
invalid :math:`(\phi, \theta)` coordinates are set to ``numpy.nan``.
Returns
-------
x, y : tuple of numpy.ndarray
Projected coordinates.
Raises
------
MemoryError
Null ``prjprm`` pointer passed to WCSLIB routines.
InvalidPrjParametersError
Invalid projection parameters.
"""
celfix = """
Translates AIPS-convention celestial projection types, ``-NCP`` and
``-GLS``.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
cname = """
``list of strings`` A list of the coordinate axis names, from
``CNAMEia``.
"""
colax = """
``int array[naxis]`` An array recording the column numbers for each
axis in a pixel list.
"""
colnum = """
``int`` Column of FITS binary table associated with this WCS.
Where the coordinate representation is associated with an image-array
column in a FITS binary table, this property may be used to record the
relevant column number.
It should be set to zero for an image header or pixel list.
"""
compare = """
compare(other, cmp=0, tolerance=0.0)
Compare two Wcsprm objects for equality.
Parameters
----------
other : Wcsprm
The other Wcsprm object to compare to.
cmp : int, optional
A bit field controlling the strictness of the comparison. When 0,
(the default), all fields must be identical.
The following constants, defined in the `astropy.wcs` module,
may be or'ed together to loosen the comparison.
- ``WCSCOMPARE_ANCILLARY``: Ignores ancillary keywords that don't
change the WCS transformation, such as ``XPOSURE`` or
``EQUINOX``. Note that this also ignores ``DATE-OBS``, which does
change the WCS transformation in some cases.
- ``WCSCOMPARE_TILING``: Ignore integral differences in
``CRPIXja``. This is the 'tiling' condition, where two WCSes
cover different regions of the same map projection and align on
the same map grid.
- ``WCSCOMPARE_CRPIX``: Ignore any differences at all in
``CRPIXja``. The two WCSes cover different regions of the same
map projection but may not align on the same grid map.
Overrides ``WCSCOMPARE_TILING``.
tolerance : float, optional
The amount of tolerance required. For example, for a value of
1e-6, all floating-point values in the objects must be equal to
the first 6 decimal places. The default value of 0.0 implies
exact equality.
Returns
-------
equal : bool
"""
convert = """
convert(array)
Perform the unit conversion on the elements of the given *array*,
returning an array of the same shape.
"""
coord = """
``double array[K_M]...[K_2][K_1][M]`` The tabular coordinate array.
Has the dimensions::
(K_M, ... K_2, K_1, M)
(see `~astropy.wcs.Tabprm.K`) i.e. with the `M` dimension
varying fastest so that the `M` elements of a coordinate vector are
stored contiguously in memory.
"""
copy = """
Creates a deep copy of the WCS object.
"""
cpdis1 = """
`~astropy.wcs.DistortionLookupTable`
The pre-linear transformation distortion lookup table, ``CPDIS1``.
"""
cpdis2 = """
`~astropy.wcs.DistortionLookupTable`
The pre-linear transformation distortion lookup table, ``CPDIS2``.
"""
crder = """
``double array[naxis]`` The random error in each coordinate axis,
``CRDERia``.
An undefined value is represented by NaN.
"""
crln_obs = """
``double`` Carrington heliographic longitude of the observer (deg). If
undefined, this is set to `None`.
"""
crota = """
``double array[naxis]`` ``CROTAia`` keyvalues for each coordinate
axis.
For historical compatibility, three alternate specifications of the
linear transformations are available in wcslib. The canonical
``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated
``CROTAia`` keywords. Although the latter may not formally co-exist
with ``PCi_ja``, the approach here is simply to ignore them if given
in conjunction with ``PCi_ja``.
`~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and
`~astropy.wcs.Wcsprm.has_crota` can be used to determine which of
these alternatives are present in the header.
These alternate specifications of the linear transformation matrix are
translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and
are nowhere visible to the lower-level routines. In particular,
`~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity
if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is
associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts
to a unity ``PCi_ja`` matrix.
"""
crpix = """
``double array[naxis]`` Coordinate reference pixels (``CRPIXja``) for
each pixel axis.
"""
crval = """
``double array[naxis]`` Coordinate reference values (``CRVALia``) for
each coordinate axis.
"""
crval_tabprm = """
``double array[M]`` Index values for the reference pixel for each of
the tabular coord axes.
"""
csyer = """
``double array[naxis]`` The systematic error in the coordinate value
axes, ``CSYERia``.
An undefined value is represented by NaN.
"""
ctype = """
``list of strings[naxis]`` List of ``CTYPEia`` keyvalues.
The `~astropy.wcs.Wcsprm.ctype` keyword values must be in upper case
and there must be zero or one pair of matched celestial axis types,
and zero or one spectral axis.
"""
cubeface = """
``int`` Index into the ``pixcrd`` (pixel coordinate) array for the
``CUBEFACE`` axis.
This is used for quadcube projections where the cube faces are stored
on a separate axis.
The quadcube projections (``TSC``, ``CSC``, ``QSC``) may be
represented in FITS in either of two ways:
- The six faces may be laid out in one plane and numbered as
follows::
0
4 3 2 1 4 3 2
5
Faces 2, 3 and 4 may appear on one side or the other (or both).
The world-to-pixel routines map faces 2, 3 and 4 to the left but
the pixel-to-world routines accept them on either side.
- The ``COBE`` convention in which the six faces are stored in a
three-dimensional structure using a ``CUBEFACE`` axis indexed
from 0 to 5 as above.
These routines support both methods; `~astropy.wcs.Wcsprm.set`
determines which is being used by the presence or absence of a
``CUBEFACE`` axis in `~astropy.wcs.Wcsprm.ctype`.
`~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` translate the
``CUBEFACE`` axis representation to the single plane representation
understood by the lower-level projection routines.
"""
cunit = """
``list of astropy.UnitBase[naxis]`` List of ``CUNITia`` keyvalues as
`astropy.units.UnitBase` instances.
These define the units of measurement of the ``CRVALia``, ``CDELTia``
and ``CDi_ja`` keywords.
As ``CUNITia`` is an optional header keyword,
`~astropy.wcs.Wcsprm.cunit` may be left blank but otherwise is
expected to contain a standard units specification as defined by WCS
Paper I. `~astropy.wcs.Wcsprm.unitfix` is available to translate
commonly used non-standard units specifications but this must be done
as a separate step before invoking `~astropy.wcs.Wcsprm.set`.
For celestial axes, if `~astropy.wcs.Wcsprm.cunit` is not blank,
`~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale
`~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and
`~astropy.wcs.Wcsprm.cd` to decimal degrees. It then resets
`~astropy.wcs.Wcsprm.cunit` to ``"deg"``.
For spectral axes, if `~astropy.wcs.Wcsprm.cunit` is not blank,
`~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale
`~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and
`~astropy.wcs.Wcsprm.cd` to SI units. It then resets
`~astropy.wcs.Wcsprm.cunit` accordingly.
`~astropy.wcs.Wcsprm.set` ignores `~astropy.wcs.Wcsprm.cunit` for
other coordinate types; `~astropy.wcs.Wcsprm.cunit` may be used to
label coordinate values.
"""
cylfix = """
cylfix()
Fixes WCS keyvalues for malformed cylindrical projections.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
data = """
``float array`` The array data for the
`~astropy.wcs.DistortionLookupTable`.
"""
data_wtbarr = """
``double array``
The array data for the BINTABLE.
"""
dateavg = """
``string`` Representative mid-point of the date of observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.dateobs
"""
dateobs = """
``string`` Start of the date of observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.dateavg
"""
datfix = """
datfix()
Translates the old ``DATE-OBS`` date format to year-2000 standard form
``(yyyy-mm-ddThh:mm:ss)`` and derives ``MJD-OBS`` from it if not
already set.
Alternatively, if `~astropy.wcs.Wcsprm.mjdobs` is set and
`~astropy.wcs.Wcsprm.dateobs` isn't, then `~astropy.wcs.Wcsprm.datfix`
derives `~astropy.wcs.Wcsprm.dateobs` from it. If both are set but
disagree by more than half a day then `ValueError` is raised.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
delta = """
``double array[M]`` (read-only) Interpolated indices into the coord
array.
Array of interpolated indices into the coordinate array such that
Upsilon_m, as defined in Paper III, is equal to
(`~astropy.wcs.Tabprm.p0` [m] + 1) + delta[m].
"""
det2im = """
Convert detector coordinates to image plane coordinates.
"""
det2im1 = """
A `~astropy.wcs.DistortionLookupTable` object for detector to image plane
correction in the *x*-axis.
"""
det2im2 = """
A `~astropy.wcs.DistortionLookupTable` object for detector to image plane
correction in the *y*-axis.
"""
dims = """
``int array[ndim]`` (read-only)
The dimensions of the tabular array
`~astropy.wcs.Wtbarr.data`.
"""
DistortionLookupTable = """
DistortionLookupTable(*table*, *crpix*, *crval*, *cdelt*)
Represents a single lookup table for a `distortion paper`_
transformation.
Parameters
----------
table : 2-dimensional array
The distortion lookup table.
crpix : 2-tuple
The distortion array reference pixel
crval : 2-tuple
The image array pixel coordinate
cdelt : 2-tuple
The grid step size
"""
dsun_obs = """
``double`` Distance between the centre of the Sun and the observer (m). If
undefined, this is set to `None`.
"""
equinox = """
``double`` The equinox associated with dynamical equatorial or
ecliptic coordinate systems.
``EQUINOXa`` (or ``EPOCH`` in older headers). Not applicable to ICRS
equatorial or ecliptic coordinates.
An undefined value is represented by NaN.
"""
extlev = """
``int`` (read-only) ``EXTLEV`` identifying the binary table extension.
"""
extnam = """
``str`` (read-only) ``EXTNAME`` identifying the binary table extension.
"""
extrema = """
``double array[K_M]...[K_2][2][M]`` (read-only)
An array recording the minimum and maximum value of each element of
the coordinate vector in each row of the coordinate array, with the
dimensions::
(K_M, ... K_2, 2, M)
(see `~astropy.wcs.Tabprm.K`). The minimum is recorded
in the first element of the compressed K_1 dimension, then the
maximum. This array is used by the inverse table lookup function to
speed up table searches.
"""
extver = """
``int`` (read-only) ``EXTVER`` identifying the binary table extension.
"""
find_all_wcs = """
find_all_wcs(relax=0, keysel=0)
Find all WCS transformations in the header.
Parameters
----------
header : str
The raw FITS header data.
relax : bool or int
Degree of permissiveness:
- `False`: Recognize only FITS keywords defined by the published
WCS standard.
- `True`: Admit all recognized informal extensions of the WCS
standard.
- `int`: a bit field selecting specific extensions to accept. See
:ref:`astropy:relaxread` for details.
keysel : sequence of flags
Used to restrict the keyword types considered:
- ``WCSHDR_IMGHEAD``: Image header keywords.
- ``WCSHDR_BIMGARR``: Binary table image array.
- ``WCSHDR_PIXLIST``: Pixel list keywords.
If zero, there is no restriction. If -1, `wcspih` is called,
rather than `wcstbh`.
Returns
-------
wcs_list : list of `~astropy.wcs.Wcsprm`
"""
fix = """
fix(translate_units='', naxis=0)
Applies all of the corrections handled separately by
`~astropy.wcs.Wcsprm.datfix`, `~astropy.wcs.Wcsprm.unitfix`,
`~astropy.wcs.Wcsprm.celfix`, `~astropy.wcs.Wcsprm.spcfix`,
`~astropy.wcs.Wcsprm.cylfix` and `~astropy.wcs.Wcsprm.cdfix`.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of non-standard unit
strings to perform. By default, performs all.
Although ``"S"`` is commonly used to represent seconds, its
translation to ``"s"`` is potentially unsafe since the standard
recognizes ``"S"`` formally as Siemens, however rarely that may be
used. The same applies to ``"H"`` for hours (Henry), and ``"D"``
for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to ``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to ``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to ``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'``
does all of them.
naxis : int array, optional
Image axis lengths. If this array is set to zero or ``None``,
then `~astropy.wcs.Wcsprm.cylfix` will not be invoked.
Returns
-------
status : dict
Returns a dictionary containing the following keys, each referring
to a status string for each of the sub-fix functions that were
called:
- `~astropy.wcs.Wcsprm.cdfix`
- `~astropy.wcs.Wcsprm.datfix`
- `~astropy.wcs.Wcsprm.unitfix`
- `~astropy.wcs.Wcsprm.celfix`
- `~astropy.wcs.Wcsprm.spcfix`
- `~astropy.wcs.Wcsprm.cylfix`
"""
get_offset = """
get_offset(x, y) -> (x, y)
Returns the offset as defined in the distortion lookup table.
Returns
-------
coordinate : (2,) tuple
The offset from the distortion table for pixel point (*x*, *y*).
"""
get_cdelt = """
get_cdelt() -> numpy.ndarray
Coordinate increments (``CDELTia``) for each coord axis as ``double array[naxis]``.
Returns the ``CDELT`` offsets in read-only form. Unlike the
`~astropy.wcs.Wcsprm.cdelt` property, this works even when the header
specifies the linear transformation matrix in one of the alternative
``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access
to the linear transformation matrix, but don't care how it was
specified in the header.
"""
get_pc = """
get_pc() -> numpy.ndarray
Returns the ``PC`` matrix in read-only form as ``double array[naxis][naxis]``. Unlike the
`~astropy.wcs.Wcsprm.pc` property, this works even when the header
specifies the linear transformation matrix in one of the alternative
``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access
to the linear transformation matrix, but don't care how it was
specified in the header.
"""
get_ps = """
get_ps() -> list
Returns ``PSi_ma`` keywords for each *i* and *m* as list of tuples.
Returns
-------
ps : list
Returned as a list of tuples of the form (*i*, *m*, *value*):
- *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative)
- *value*: string. Parameter value.
See also
--------
astropy.wcs.Wcsprm.set_ps : Set ``PSi_ma`` values
"""
get_pv = """
get_pv() -> list
Returns ``PVi_ma`` keywords for each *i* and *m* as list of tuples.
Returns
-------
sequence of tuple
Returned as a list of tuples of the form (*i*, *m*, *value*):
- *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative)
- *value*: string. Parameter value.
See also
--------
astropy.wcs.Wcsprm.set_pv : Set ``PVi_ma`` values
Notes
-----
Note that, if they were not given, `~astropy.wcs.Wcsprm.set` resets
the entries for ``PVi_1a``, ``PVi_2a``, ``PVi_3a``, and ``PVi_4a`` for
longitude axis *i* to match (``phi_0``, ``theta_0``), the native
longitude and latitude of the reference point given by ``LONPOLEa``
and ``LATPOLEa``.
"""
has_cd = """
has_cd() -> bool
Returns `True` if ``CDi_ja`` is present.
``CDi_ja`` is an alternate specification of the linear transformation
matrix, maintained for historical compatibility.
Matrix elements in the IRAF convention are equivalent to the product
``CDi_ja = CDELTia * PCi_ja``, but the defaults differ from that of
the ``PCi_ja`` matrix. If one or more ``CDi_ja`` keywords are present
then all unspecified ``CDi_ja`` default to zero. If no ``CDi_ja`` (or
``CROTAia``) keywords are present, then the header is assumed to be in
``PCi_ja`` form whether or not any ``PCi_ja`` keywords are present
since this results in an interpretation of ``CDELTia`` consistent with
the original FITS specification.
While ``CDi_ja`` may not formally co-exist with ``PCi_ja``, it may
co-exist with ``CDELTia`` and ``CROTAia`` which are to be ignored.
See also
--------
astropy.wcs.Wcsprm.cd : Get the raw ``CDi_ja`` values.
"""
has_cdi_ja = """
has_cdi_ja() -> bool
Alias for `~astropy.wcs.Wcsprm.has_cd`. Maintained for backward
compatibility.
"""
has_crota = """
has_crota() -> bool
Returns `True` if ``CROTAia`` is present.
``CROTAia`` is an alternate specification of the linear transformation
matrix, maintained for historical compatibility.
In the AIPS convention, ``CROTAia`` may only be associated with the
latitude axis of a celestial axis pair. It specifies a rotation in
the image plane that is applied *after* the ``CDELTia``; any other
``CROTAia`` keywords are ignored.
``CROTAia`` may not formally co-exist with ``PCi_ja``. ``CROTAia`` and
``CDELTia`` may formally co-exist with ``CDi_ja`` but if so are to be
ignored.
See also
--------
astropy.wcs.Wcsprm.crota : Get the raw ``CROTAia`` values
"""
has_crotaia = """
has_crotaia() -> bool
Alias for `~astropy.wcs.Wcsprm.has_crota`. Maintained for backward
compatibility.
"""
has_pc = """
has_pc() -> bool
Returns `True` if ``PCi_ja`` is present. ``PCi_ja`` is the
recommended way to specify the linear transformation matrix.
See also
--------
astropy.wcs.Wcsprm.pc : Get the raw ``PCi_ja`` values
"""
has_pci_ja = """
has_pci_ja() -> bool
Alias for `~astropy.wcs.Wcsprm.has_pc`. Maintained for backward
compatibility.
"""
hgln_obs = """
``double`` Stonyhurst heliographic longitude of the observer. If
undefined, this is set to `None`.
"""
hglt_obs = """
``double`` Heliographic latitude (Carrington or Stonyhurst) of the observer
(deg). If undefined, this is set to `None`.
"""
i = """
``int`` (read-only) Image axis number.
"""
imgpix_matrix = """
``double array[2][2]`` (read-only) Inverse of the ``CDELT`` or ``PC``
matrix.
Inverse containing the product of the ``CDELTia`` diagonal matrix and
the ``PCi_ja`` matrix.
"""
is_unity = """
is_unity() -> bool
Returns `True` if the linear transformation matrix
(`~astropy.wcs.Wcsprm.cd`) is unity.
"""
K = """
``int array[M]`` (read-only) The lengths of the axes of the coordinate
array.
An array of length `M` whose elements record the lengths of the axes of
the coordinate array and of each indexing vector.
"""
kind = """
``str`` (read-only) ``wcstab`` array type.
Character identifying the ``wcstab`` array type:
- ``'c'``: coordinate array,
- ``'i'``: index vector.
"""
lat = """
``int`` (read-only) The index into the world coord array containing
latitude values.
"""
latpole = """
``double`` The native latitude of the celestial pole, ``LATPOLEa`` (deg).
"""
lattyp = """
``string`` (read-only) Celestial axis type for latitude.
For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--",
"DEC-", "GLON", "GLAT", etc. in the first four characters of
``CTYPEia`` but with trailing dashes removed.
"""
lng = """
``int`` (read-only) The index into the world coord array containing
longitude values.
"""
lngtyp = """
``string`` (read-only) Celestial axis type for longitude.
For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--",
"DEC-", "GLON", "GLAT", etc. in the first four characters of
``CTYPEia`` but with trailing dashes removed.
"""
lonpole = """
``double`` The native longitude of the celestial pole.
``LONPOLEa`` (deg).
"""
M = """
``int`` (read-only) Number of tabular coordinate axes.
"""
m = """
``int`` (read-only) ``wcstab`` axis number for index vectors.
"""
map = """
``int array[M]`` Association between axes.
A vector of length `~astropy.wcs.Tabprm.M` that defines
the association between axis *m* in the *M*-dimensional coordinate
array (1 <= *m* <= *M*) and the indices of the intermediate world
coordinate and world coordinate arrays.
When the intermediate and world coordinate arrays contain the full
complement of coordinate elements in image-order, as will usually be
the case, then ``map[m-1] == i-1`` for axis *i* in the *N*-dimensional
image (1 <= *i* <= *N*). In terms of the FITS keywords::
map[PVi_3a - 1] == i - 1.
However, a different association may result if the intermediate
coordinates, for example, only contains a (relevant) subset of
intermediate world coordinate elements. For example, if *M* == 1 for
an image with *N* > 1, it is possible to fill the intermediate
coordinates with the relevant coordinate element with ``nelem`` set to
1. In this case ``map[0] = 0`` regardless of the value of *i*.
"""
mix = f"""
mix(mixpix, mixcel, vspan, vstep, viter, world, pixcrd, origin)
Given either the celestial longitude or latitude plus an element of
the pixel coordinate, solves for the remaining elements by iterating
on the unknown celestial coordinate element using
`~astropy.wcs.Wcsprm.s2p`.
Parameters
----------
mixpix : int
Which element on the pixel coordinate is given.
mixcel : int
Which element of the celestial coordinate is given. If *mixcel* =
``1``, celestial longitude is given in ``world[self.lng]``,
latitude returned in ``world[self.lat]``. If *mixcel* = ``2``,
celestial latitude is given in ``world[self.lat]``, longitude
returned in ``world[self.lng]``.
vspan : (float, float)
Solution interval for the celestial coordinate, in degrees. The
ordering of the two limits is irrelevant. Longitude ranges may be
specified with any convenient normalization, for example
``(-120,+120)`` is the same as ``(240,480)``, except that the
solution will be returned with the same normalization, i.e. lie
within the interval specified.
vstep : float
Step size for solution search, in degrees. If ``0``, a sensible,
although perhaps non-optimal default will be used.
viter : int
If a solution is not found then the step size will be halved and
the search recommenced. *viter* controls how many times the step
size is halved. The allowed range is 5 - 10.
world : ndarray
World coordinate elements as ``double array[naxis]``. ``world[self.lng]`` and
``world[self.lat]`` are the celestial longitude and latitude, in
degrees. Which is given and which returned depends on the value
of *mixcel*. All other elements are given. The results will be
written to this array in-place.
pixcrd : ndarray
Pixel coordinates as ``double array[naxis]``. The element indicated by *mixpix* is given and
the remaining elements will be written in-place.
{ORIGIN()}
Returns
-------
result : dict
Returns a dictionary with the following keys:
- *phi* (``double array[naxis]``)
- *theta* (``double array[naxis]``)
- Longitude and latitude in the native coordinate system of
the projection, in degrees.
- *imgcrd* (``double array[naxis]``)
- Image coordinate elements. ``imgcrd[self.lng]`` and
``imgcrd[self.lat]`` are the projected *x*- and
*y*-coordinates, in decimal degrees.
- *world* (``double array[naxis]``)
- Another reference to the *world* argument passed in.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
InvalidCoordinateError
Invalid world coordinate.
NoSolutionError
No solution found in the specified interval.
See also
--------
astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng
Get the axes numbers for latitude and longitude
Notes
-----
Initially, the specified solution interval is checked to see if it's a
\"crossing\" interval. If it isn't, a search is made for a crossing
solution by iterating on the unknown celestial coordinate starting at
the upper limit of the solution interval and decrementing by the
specified step size. A crossing is indicated if the trial value of
the pixel coordinate steps through the value specified. If a crossing
interval is found then the solution is determined by a modified form
of \"regula falsi\" division of the crossing interval. If no crossing
interval was found within the specified solution interval then a
search is made for a \"non-crossing\" solution as may arise from a
point of tangency. The process is complicated by having to make
allowance for the discontinuities that occur in all map projections.
Once one solution has been determined others may be found by
subsequent invocations of `~astropy.wcs.Wcsprm.mix` with suitably
restricted solution intervals.
Note the circumstance that arises when the solution point lies at a
native pole of a projection in which the pole is represented as a
finite curve, for example the zenithals and conics. In such cases two
or more valid solutions may exist but `~astropy.wcs.Wcsprm.mix` only
ever returns one.
Because of its generality, `~astropy.wcs.Wcsprm.mix` is very
compute-intensive. For compute-limited applications, more efficient
special-case solvers could be written for simple projections, for
example non-oblique cylindrical projections.
"""
mjdavg = """
``double`` Modified Julian Date corresponding to ``DATE-AVG``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdobs
"""
mjdobs = """
``double`` Modified Julian Date corresponding to ``DATE-OBS``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdavg
"""
name = """
``string`` The name given to the coordinate representation
``WCSNAMEa``.
"""
naxis = """
``int`` (read-only) The number of axes (pixel and coordinate).
Given by the ``NAXIS`` or ``WCSAXESa`` keyvalues.
The number of coordinate axes is determined at parsing time, and can
not be subsequently changed.
It is determined from the highest of the following:
1. ``NAXIS``
2. ``WCSAXESa``
3. The highest axis number in any parameterized WCS keyword. The
keyvalue, as well as the keyword, must be syntactically valid
otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header only
contains auxiliary WCS keywords for a particular coordinate
representation, then no coordinate description is constructed for it.
This value may differ for different coordinate representations of the
same image.
"""
nc = """
``int`` (read-only) Total number of coord vectors in the coord array.
Total number of coordinate vectors in the coordinate array being the
product K_1 * K_2 * ... * K_M.
"""
ndim = """
``int`` (read-only) Expected dimensionality of the ``wcstab`` array.
"""
obsgeo = """
``double array[3]`` Location of the observer in a standard terrestrial
reference frame.
``OBSGEO-X``, ``OBSGEO-Y``, ``OBSGEO-Z`` (in meters).
An undefined value is represented by NaN.
"""
p0 = """
``int array[M]`` Interpolated indices into the coordinate array.
Vector of length `~astropy.wcs.Tabprm.M` of interpolated
indices into the coordinate array such that Upsilon_m, as defined in
Paper III, is equal to ``(p0[m] + 1) + delta[m]``.
"""
p2s = f"""
p2s(pixcrd, origin)
Converts pixel to world coordinates.
Parameters
----------
pixcrd : ndarray
Array of pixel coordinates as ``double array[ncoord][nelem]``.
{ORIGIN()}
Returns
-------
result : dict
Returns a dictionary with the following keys:
- *imgcrd*: ndarray
- Array of intermediate world coordinates as ``double array[ncoord][nelem]``. For celestial axes,
``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the
projected *x*-, and *y*-coordinates, in pseudo degrees. For
spectral axes, ``imgcrd[][self.spec]`` is the intermediate
spectral coordinate, in SI units.
- *phi*: ndarray
- Array as ``double array[ncoord]``.
- *theta*: ndarray
- Longitude and latitude in the native coordinate system of the
projection, in degrees, as ``double array[ncoord]``.
- *world*: ndarray
- Array of world coordinates as ``double array[ncoord][nelem]``. For celestial axes,
``world[][self.lng]`` and ``world[][self.lat]`` are the
celestial longitude and latitude, in degrees. For spectral
axes, ``world[][self.spec]`` is the intermediate spectral
coordinate, in SI units.
- *stat*: ndarray
- Status return value for each coordinate as ``int array[ncoord]``. ``0`` for success,
``1+`` for invalid pixel coordinate.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
*x*- and *y*-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
See also
--------
astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng
Definition of the latitude and longitude axes
"""
p4_pix2foc = f"""
p4_pix2foc(*pixcrd, origin*) -> ``double array[ncoord][nelem]``
Convert pixel coordinates to focal plane coordinates using `distortion
paper`_ lookup-table correction.
Parameters
----------
pixcrd : ndarray
Array of pixel coordinates as ``double array[ncoord][nelem]``.
{ORIGIN()}
Returns
-------
foccrd : ndarray
Returns an array of focal plane coordinates as ``double array[ncoord][nelem]``.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
pc = """
``double array[naxis][naxis]`` The ``PCi_ja`` (pixel coordinate)
transformation matrix.
The order is::
[[PC1_1, PC1_2],
[PC2_1, PC2_2]]
For historical compatibility, three alternate specifications of the
linear transformations are available in wcslib. The canonical
``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated
``CROTAia`` keywords. Although the latter may not formally co-exist
with ``PCi_ja``, the approach here is simply to ignore them if given
in conjunction with ``PCi_ja``.
`~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and
`~astropy.wcs.Wcsprm.has_crota` can be used to determine which of
these alternatives are present in the header.
These alternate specifications of the linear transformation matrix are
translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and
are nowhere visible to the lower-level routines. In particular,
`~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity
if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is
associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts
to a unity ``PCi_ja`` matrix.
"""
phi0 = """
``double`` The native latitude of the fiducial point.
The point whose celestial coordinates are given in ``ref[1:2]``. If
undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`,
will set this to a projection-specific default.
See also
--------
astropy.wcs.Wcsprm.theta0
"""
pix2foc = f"""
pix2foc(*pixcrd, origin*) -> ``double array[ncoord][nelem]``
Perform both `SIP`_ polynomial and `distortion paper`_ lookup-table
correction in parallel.
Parameters
----------
pixcrd : ndarray
Array of pixel coordinates as ``double array[ncoord][nelem]``.
{ORIGIN()}
Returns
-------
foccrd : ndarray
Returns an array of focal plane coordinates as ``double array[ncoord][nelem]``.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
piximg_matrix = """
``double array[2][2]`` (read-only) Matrix containing the product of
the ``CDELTia`` diagonal matrix and the ``PCi_ja`` matrix.
"""
print_contents = """
print_contents()
Print the contents of the `~astropy.wcs.Wcsprm` object to stdout.
Probably only useful for debugging purposes, and may be removed in the
future.
To get a string of the contents, use `repr`.
"""
print_contents_tabprm = """
print_contents()
Print the contents of the `~astropy.wcs.Tabprm` object to
stdout. Probably only useful for debugging purposes, and may be
removed in the future.
To get a string of the contents, use `repr`.
"""
print_contents_wtbarr = """
print_contents()
Print the contents of the `~astropy.wcs.Wtbarr` object to
stdout. Probably only useful for debugging purposes, and may be
removed in the future.
To get a string of the contents, use `repr`.
"""
radesys = """
``string`` The equatorial or ecliptic coordinate system type,
``RADESYSa``.
"""
restfrq = """
``double`` Rest frequency (Hz) from ``RESTFRQa``.
An undefined value is represented by NaN.
"""
restwav = """
``double`` Rest wavelength (m) from ``RESTWAVa``.
An undefined value is represented by NaN.
"""
row = """
``int`` (read-only) Table row number.
"""
rsun_ref = """
``double`` Reference radius of the Sun used in coordinate calculations (m).
If undefined, this is set to `None`.
"""
s2p = f"""
s2p(world, origin)
Transforms world coordinates to pixel coordinates.
Parameters
----------
world : ndarray
Array of world coordinates, in decimal degrees, as ``double array[ncoord][nelem]``.
{ORIGIN()}
Returns
-------
result : dict
Returns a dictionary with the following keys:
- *phi*: ``double array[ncoord]``
- *theta*: ``double array[ncoord]``
- Longitude and latitude in the native coordinate system of
the projection, in degrees.
- *imgcrd*: ``double array[ncoord][nelem]``
- Array of intermediate world coordinates. For celestial axes,
``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the
projected *x*-, and *y*-coordinates, in pseudo \"degrees\".
For quadcube projections with a ``CUBEFACE`` axis, the face
number is also returned in ``imgcrd[][self.cubeface]``. For
spectral axes, ``imgcrd[][self.spec]`` is the intermediate
spectral coordinate, in SI units.
- *pixcrd*: ``double array[ncoord][nelem]``
- Array of pixel coordinates. Pixel coordinates are
zero-based.
- *stat*: ``int array[ncoord]``
- Status return value for each coordinate. ``0`` for success,
``1+`` for invalid pixel coordinate.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
See also
--------
astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng
Definition of the latitude and longitude axes
"""
sense = """
``int array[M]`` +1 if monotonically increasing, -1 if decreasing.
A vector of length `~astropy.wcs.Tabprm.M` whose elements
indicate whether the corresponding indexing vector is monotonically
increasing (+1), or decreasing (-1).
"""
set = """
set()
Sets up a WCS object for use according to information supplied within
it.
Note that this routine need not be called directly; it will be invoked
by `~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` if
necessary.
Some attributes that are based on other attributes (such as
`~astropy.wcs.Wcsprm.lattyp` on `~astropy.wcs.Wcsprm.ctype`) may not
be correct until after `~astropy.wcs.Wcsprm.set` is called.
`~astropy.wcs.Wcsprm.set` strips off trailing blanks in all string
members.
`~astropy.wcs.Wcsprm.set` recognizes the ``NCP`` projection and
converts it to the equivalent ``SIN`` projection and it also
recognizes ``GLS`` as a synonym for ``SFL``. It does alias
translation for the AIPS spectral types (``FREQ-LSR``, ``FELO-HEL``,
etc.) but without changing the input header keywords.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
set_tabprm = """
set()
Allocates memory for work arrays.
Also sets up the class according to information supplied within it.
Note that this routine need not be called directly; it will be invoked
by functions that need it.
Raises
------
MemoryError
Memory allocation failed.
InvalidTabularParametersError
Invalid tabular parameters.
"""
set_celprm = """
set()
Sets up a ``celprm`` struct according to information supplied within it.
Note that this routine need not be called directly; it will be invoked
by functions that need it.
Raises
------
MemoryError
Memory allocation failed.
InvalidPrjParametersError
Invalid celestial parameters.
"""
set_ps = """
set_ps(ps)
Sets ``PSi_ma`` keywords for each *i* and *m*.
Parameters
----------
ps : sequence of tuple
The input must be a sequence of tuples of the form (*i*, *m*,
*value*):
- *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative)
- *value*: string. Parameter value.
See also
--------
astropy.wcs.Wcsprm.get_ps
"""
set_pv = """
set_pv(pv)
Sets ``PVi_ma`` keywords for each *i* and *m*.
Parameters
----------
pv : list of tuple
The input must be a sequence of tuples of the form (*i*, *m*,
*value*):
- *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative)
- *value*: float. Parameter value.
See also
--------
astropy.wcs.Wcsprm.get_pv
"""
sip = """
Get/set the `~astropy.wcs.Sip` object for performing `SIP`_ distortion
correction.
"""
Sip = """
Sip(*a, b, ap, bp, crpix*)
The `~astropy.wcs.Sip` class performs polynomial distortion correction
using the `SIP`_ convention in both directions.
Parameters
----------
a : ndarray
The ``A_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``.
Its size must be (*m* + 1, *m* + 1) where *m* = ``A_ORDER``.
b : ndarray
The ``B_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``.
Its size must be (*m* + 1, *m* + 1) where *m* = ``B_ORDER``.
ap : ndarray
The ``AP_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``.
Its size must be (*m* + 1, *m* + 1) where *m* = ``AP_ORDER``.
bp : ndarray
The ``BP_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``.
Its size must be (*m* + 1, *m* + 1) where *m* = ``BP_ORDER``.
crpix : ndarray
The reference pixel as ``double array[2]``.
Notes
-----
Shupe, D. L., M. Moshir, J. Li, D. Makovoz and R. Narron. 2005.
"The SIP Convention for Representing Distortion in FITS Image
Headers." ADASS XIV.
"""
sip_foc2pix = f"""
sip_foc2pix(*foccrd, origin*) -> ``double array[ncoord][nelem]``
Convert focal plane coordinates to pixel coordinates using the `SIP`_
polynomial distortion convention.
Parameters
----------
foccrd : ndarray
Array of focal plane coordinates as ``double array[ncoord][nelem]``.
{ORIGIN()}
Returns
-------
pixcrd : ndarray
Returns an array of pixel coordinates as ``double array[ncoord][nelem]``.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
sip_pix2foc = f"""
sip_pix2foc(*pixcrd, origin*) -> ``double array[ncoord][nelem]``
Convert pixel coordinates to focal plane coordinates using the `SIP`_
polynomial distortion convention.
Parameters
----------
pixcrd : ndarray
Array of pixel coordinates as ``double array[ncoord][nelem]``.
{ORIGIN()}
Returns
-------
foccrd : ndarray
Returns an array of focal plane coordinates as ``double array[ncoord][nelem]``.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
spcfix = """
spcfix() -> int
Translates AIPS-convention spectral coordinate types. {``FREQ``,
``VELO``, ``FELO``}-{``OBS``, ``HEL``, ``LSR``} (e.g. ``FREQ-LSR``,
``VELO-OBS``, ``FELO-HEL``)
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
spec = """
``int`` (read-only) The index containing the spectral axis values.
"""
specsys = """
``string`` Spectral reference frame (standard of rest), ``SPECSYSa``.
See also
--------
astropy.wcs.Wcsprm.ssysobs, astropy.wcs.Wcsprm.velosys
"""
sptr = """
sptr(ctype, i=-1)
Translates the spectral axis in a WCS object.
For example, a ``FREQ`` axis may be translated into ``ZOPT-F2W`` and
vice versa.
Parameters
----------
ctype : str
Required spectral ``CTYPEia``, maximum of 8 characters. The first
four characters are required to be given and are never modified.
The remaining four, the algorithm code, are completely determined
by, and must be consistent with, the first four characters.
Wildcarding may be used, i.e. if the final three characters are
specified as ``\"???\"``, or if just the eighth character is
specified as ``\"?\"``, the correct algorithm code will be
substituted and returned.
i : int
Index of the spectral axis (0-relative). If ``i < 0`` (or not
provided), it will be set to the first spectral axis identified
from the ``CTYPE`` keyvalues in the FITS header.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
InvalidSubimageSpecificationError
Invalid subimage specification (no spectral axis).
"""
ssysobs = """
``string`` Spectral reference frame.
The spectral reference frame in which there is no differential
variation in the spectral coordinate across the field-of-view,
``SSYSOBSa``.
See also
--------
astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.velosys
"""
ssyssrc = """
``string`` Spectral reference frame for redshift.
The spectral reference frame (standard of rest) in which the redshift
was measured, ``SSYSSRCa``.
"""
sub = """
sub(axes)
Extracts the coordinate description for a subimage from a
`~astropy.wcs.WCS` object.
The world coordinate system of the subimage must be separable in the
sense that the world coordinates at any point in the subimage must
depend only on the pixel coordinates of the axes extracted. In
practice, this means that the ``PCi_ja`` matrix of the original image
must not contain non-zero off-diagonal terms that associate any of the
subimage axes with any of the non-subimage axes.
`sub` can also add axes to a wcsprm object. The new axes will be
created using the defaults set by the Wcsprm constructor which produce
a simple, unnamed, linear axis with world coordinates equal to the
pixel coordinate. These default values can be changed before
invoking `set`.
Parameters
----------
axes : int or a sequence.
- If an int, include the first *N* axes in their original order.
- If a sequence, may contain a combination of image axis numbers
(1-relative) or special axis identifiers (see below). Order is
significant; ``axes[0]`` is the axis number of the input image
that corresponds to the first axis in the subimage, etc. Use an
axis number of 0 to create a new axis using the defaults.
- If ``0``, ``[]`` or ``None``, do a deep copy.
Coordinate axes types may be specified using either strings or
special integer constants. The available types are:
- ``'longitude'`` / ``WCSSUB_LONGITUDE``: Celestial longitude
- ``'latitude'`` / ``WCSSUB_LATITUDE``: Celestial latitude
- ``'cubeface'`` / ``WCSSUB_CUBEFACE``: Quadcube ``CUBEFACE`` axis
- ``'spectral'`` / ``WCSSUB_SPECTRAL``: Spectral axis
- ``'stokes'`` / ``WCSSUB_STOKES``: Stokes axis
- ``'temporal'`` / ``WCSSUB_TIME``: Time axis (requires ``WCSLIB`` version 7.8 or greater)
- ``'celestial'`` / ``WCSSUB_CELESTIAL``: An alias for the
combination of ``'longitude'``, ``'latitude'`` and ``'cubeface'``.
Returns
-------
new_wcs : `~astropy.wcs.WCS` object
Raises
------
MemoryError
Memory allocation failed.
InvalidSubimageSpecificationError
Invalid subimage specification (no spectral axis).
NonseparableSubimageCoordinateSystemError
Non-separable subimage coordinate system.
Notes
-----
Combinations of subimage axes of particular types may be extracted in
the same order as they occur in the input image by combining the
integer constants with the 'binary or' (``|``) operator. For
example::
wcs.sub([WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_SPECTRAL])
would extract the longitude, latitude, and spectral axes in the same
order as the input image. If one of each were present, the resulting
object would have three dimensions.
For convenience, ``WCSSUB_CELESTIAL`` is defined as the combination
``WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_CUBEFACE``.
The codes may also be negated to extract all but the types specified,
for example::
wcs.sub([
WCSSUB_LONGITUDE,
WCSSUB_LATITUDE,
WCSSUB_CUBEFACE,
-(WCSSUB_SPECTRAL | WCSSUB_STOKES)])
The last of these specifies all axis types other than spectral or
Stokes. Extraction is done in the order specified by ``axes``, i.e. a
longitude axis (if present) would be extracted first (via ``axes[0]``)
and not subsequently (via ``axes[3]``). Likewise for the latitude and
cubeface axes in this example.
The number of dimensions in the returned object may be less than or
greater than the length of ``axes``. However, it will never exceed the
number of axes in the input image.
"""
tab = """
``list of Tabprm`` Tabular coordinate objects.
A list of tabular coordinate objects associated with this WCS.
"""
Tabprm = """
A class to store the information related to tabular coordinates,
i.e., coordinates that are defined via a lookup table.
This class can not be constructed directly from Python, but instead is
returned from `~astropy.wcs.Wcsprm.tab`.
"""
theta0 = """
``double`` The native longitude of the fiducial point.
The point whose celestial coordinates are given in ``ref[1:2]``. If
undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`,
will set this to a projection-specific default.
See also
--------
astropy.wcs.Wcsprm.phi0
"""
to_header = """
to_header(relax=False)
`to_header` translates a WCS object into a FITS header.
The details of the header depends on context:
- If the `~astropy.wcs.Wcsprm.colnum` member is non-zero then a
binary table image array header will be produced.
- Otherwise, if the `~astropy.wcs.Wcsprm.colax` member is set
non-zero then a pixel list header will be produced.
- Otherwise, a primary image or image extension header will be
produced.
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required keywords
such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or ``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will be
translated to standard (this is partially dependent on whether
``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and only if
they differ from the unit matrix. Thus, if the matrix is unity
then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`~astropy.wcs.Wcsprm.to_header` tries hard to write meaningful
comments.
8. Keyword order may be changed.
Keywords can be translated between the image array, binary table, and
pixel lists forms by manipulating the `~astropy.wcs.Wcsprm.colnum` or
`~astropy.wcs.Wcsprm.colax` members of the `~astropy.wcs.WCS`
object.
Parameters
----------
relax : bool or int
Degree of permissiveness:
- `False`: Recognize only FITS keywords defined by the published
WCS standard.
- `True`: Admit all recognized informal extensions of the WCS
standard.
- `int`: a bit field selecting specific extensions to write.
See :ref:`astropy:relaxwrite` for details.
Returns
-------
header : str
Raw FITS header as a string.
"""
ttype = """
``str`` (read-only) ``TTYPEn`` identifying the column of the binary table that contains
the wcstab array.
"""
unitfix = """
unitfix(translate_units='')
Translates non-standard ``CUNITia`` keyvalues.
For example, ``DEG`` -> ``deg``, also stripping off unnecessary
whitespace.
Parameters
----------
translate_units : str, optional
Do potentially unsafe translations of non-standard unit strings.
Although ``\"S\"`` is commonly used to represent seconds, its
recognizes ``\"S\"`` formally as Siemens, however rarely that may
be translation to ``\"s\"`` is potentially unsafe since the
standard used. The same applies to ``\"H\"`` for hours (Henry),
and ``\"D\"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``\"s\"``, translate ``\"S\"`` to ``\"s\"``.
- If the string contains ``\"h\"``, translate ``\"H\"`` to ``\"h\"``.
- If the string contains ``\"d\"``, translate ``\"D\"`` to ``\"d\"``.
Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'``
does all of them.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
velangl = """
``double`` Velocity angle.
The angle in degrees that should be used to decompose an observed
velocity into radial and transverse components.
An undefined value is represented by NaN.
"""
velosys = """
``double`` Relative radial velocity.
The relative radial velocity (m/s) between the observer and the
selected standard of rest in the direction of the celestial reference
coordinate, ``VELOSYSa``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.ssysobs
"""
velref = """
``int`` AIPS velocity code.
From ``VELREF`` keyword.
"""
wcs = """
A `~astropy.wcs.Wcsprm` object to perform the basic `wcslib`_ WCS
transformation.
"""
Wcs = """
Wcs(*sip, cpdis, wcsprm, det2im*)
Wcs objects amalgamate basic WCS (as provided by `wcslib`_), with
`SIP`_ and `distortion paper`_ operations.
To perform all distortion corrections and WCS transformation, use
``all_pix2world``.
Parameters
----------
sip : `~astropy.wcs.Sip` object or None
cpdis : (2,) tuple of `~astropy.wcs.DistortionLookupTable` or None
wcsprm : `~astropy.wcs.Wcsprm`
det2im : (2,) tuple of `~astropy.wcs.DistortionLookupTable` or None
"""
Wcsprm = """
Wcsprm(header=None, key=' ', relax=False, naxis=2, keysel=0, colsel=None)
`~astropy.wcs.Wcsprm` performs the core WCS transformations.
.. note::
The members of this object correspond roughly to the key/value
pairs in the FITS header. However, they are adjusted and
normalized in a number of ways that make performing the WCS
transformation easier. Therefore, they can not be relied upon to
get the original values in the header. For that, use
`astropy.io.fits.Header` directly.
The FITS header parsing enforces correct FITS "keyword = value" syntax
with regard to the equals sign occurring in columns 9 and 10.
However, it does recognize free-format character (NOST 100-2.0,
Sect. 5.2.1), integer (Sect. 5.2.3), and floating-point values
(Sect. 5.2.4) for all keywords.
.. warning::
Many of the attributes of this class require additional processing when
modifying underlying C structure. When needed, this additional processing
is implemented in attribute setters. Therefore, for mutable attributes, one
should always set the attribute rather than a slice of its current value (or
its individual elements) since the latter may lead the class instance to be
in an invalid state. For example, attribute ``crpix`` of a 2D WCS'
``Wcsprm`` object ``wcs`` should be set as ``wcs.crpix = [crpix1, crpix2]``
instead of ``wcs.crpix[0] = crpix1; wcs.crpix[1] = crpix2]``.
Parameters
----------
header : `~astropy.io.fits.Header`, str, or None.
If ``None``, the object will be initialized to default values.
key : str, optional
The key referring to a particular WCS transform in the header.
This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to
the ``\"a\"`` part of ``\"CTYPEia\"``. (*key* may only be
provided if *header* is also provided.)
relax : bool or int, optional
Degree of permissiveness:
- `False`: Recognize only FITS keywords defined by the published
WCS standard.
- `True`: Admit all recognized informal extensions of the WCS
standard.
- `int`: a bit field selecting specific extensions to accept. See
:ref:`astropy:relaxread` for details.
naxis : int, optional
The number of world coordinates axes for the object. (*naxis* may
only be provided if *header* is `None`.)
keysel : sequence of flag bits, optional
Vector of flag bits that may be used to restrict the keyword types
considered:
- ``WCSHDR_IMGHEAD``: Image header keywords.
- ``WCSHDR_BIMGARR``: Binary table image array.
- ``WCSHDR_PIXLIST``: Pixel list keywords.
If zero, there is no restriction. If -1, the underlying wcslib
function ``wcspih()`` is called, rather than ``wcstbh()``.
colsel : sequence of int
A sequence of table column numbers used to restrict the keywords
considered. `None` indicates no restriction.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
"""
wtb = """
``list of Wtbarr`` objects to construct coordinate lookup tables from BINTABLE.
"""
Wtbarr = """
Classes to construct coordinate lookup tables from a binary table
extension (BINTABLE).
This class can not be constructed directly from Python, but instead is
returned from `~astropy.wcs.Wcsprm.wtb`.
"""
zsource = """
``double`` The redshift, ``ZSOURCEa``, of the source.
An undefined value is represented by NaN.
"""
WcsError = """
Base class of all invalid WCS errors.
"""
SingularMatrix = """
SingularMatrixError()
The linear transformation matrix is singular.
"""
InconsistentAxisTypes = """
InconsistentAxisTypesError()
The WCS header inconsistent or unrecognized coordinate axis type(s).
"""
InvalidTransform = """
InvalidTransformError()
The WCS transformation is invalid, or the transformation parameters
are invalid.
"""
InvalidCoordinate = """
InvalidCoordinateError()
One or more of the world coordinates is invalid.
"""
NoSolution = """
NoSolutionError()
No solution can be found in the given interval.
"""
InvalidSubimageSpecification = """
InvalidSubimageSpecificationError()
The subimage specification is invalid.
"""
NonseparableSubimageCoordinateSystem = """
NonseparableSubimageCoordinateSystemError()
Non-separable subimage coordinate system.
"""
NoWcsKeywordsFound = """
NoWcsKeywordsFoundError()
No WCS keywords were found in the given header.
"""
InvalidTabularParameters = """
InvalidTabularParametersError()
The given tabular parameters are invalid.
"""
InvalidPrjParameters = """
InvalidPrjParametersError()
The given projection parameters are invalid.
"""
mjdbeg = """
``double`` Modified Julian Date corresponding to ``DATE-BEG``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdbeg
"""
mjdend = """
``double`` Modified Julian Date corresponding to ``DATE-END``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdend
"""
mjdref = """
``double`` Modified Julian Date corresponding to ``DATE-REF``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.dateref
"""
bepoch = """
``double`` Equivalent to ``DATE-OBS``.
Expressed as a Besselian epoch.
See also
--------
astropy.wcs.Wcsprm.dateobs
"""
jepoch = """
``double`` Equivalent to ``DATE-OBS``.
Expressed as a Julian epoch.
See also
--------
astropy.wcs.Wcsprm.dateobs
"""
datebeg = """
``string`` Date at the start of the observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.datebeg
"""
dateend = """
``string`` Date at the end of the observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.dateend
"""
dateref = """
``string`` Date of a reference epoch relative to which
other time measurements refer.
See also
--------
astropy.wcs.Wcsprm.dateref
"""
timesys = """
``string`` Time scale (UTC, TAI, etc.) in which all other time-related
auxiliary header values are recorded. Also defines the time scale for
an image axis with CTYPEia set to 'TIME'.
See also
--------
astropy.wcs.Wcsprm.timesys
"""
trefpos = """
``string`` Location in space where the recorded time is valid.
See also
--------
astropy.wcs.Wcsprm.trefpos
"""
trefdir = """
``string`` Reference direction used in calculating a pathlength delay.
See also
--------
astropy.wcs.Wcsprm.trefdir
"""
timeunit = """
``string`` Time units in which the following header values are expressed:
``TSTART``, ``TSTOP``, ``TIMEOFFS``, ``TIMSYER``, ``TIMRDER``, ``TIMEDEL``.
It also provides the default value for ``CUNITia`` for time axes.
See also
--------
astropy.wcs.Wcsprm.trefdir
"""
plephem = """
``string`` The Solar System ephemeris used for calculating a pathlength delay.
See also
--------
astropy.wcs.Wcsprm.plephem
"""
tstart = """
``double`` equivalent to DATE-BEG expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS.
See also
--------
astropy.wcs.Wcsprm.tstop
"""
tstop = """
``double`` equivalent to DATE-END expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS.
See also
--------
astropy.wcs.Wcsprm.tstart
"""
telapse = """
``double`` equivalent to the elapsed time between DATE-BEG and DATE-END, in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.tstart
"""
timeoffs = """
``double`` Time offset, which may be used, for example, to provide a uniform clock correction
for times referenced to DATEREF.
See also
--------
astropy.wcs.Wcsprm.timeoffs
"""
timsyer = """
``double`` the absolute error of the time values, in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.timrder
"""
timrder = """
``double`` the accuracy of time stamps relative to each other, in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.timsyer
"""
timedel = """
``double`` the resolution of the time stamps.
See also
--------
astropy.wcs.Wcsprm.timedel
"""
timepixr = """
``double`` relative position of the time stamps in binned time intervals, a value between 0.0 and 1.0.
See also
--------
astropy.wcs.Wcsprm.timepixr
"""
obsorbit = """
``string`` URI, URL, or name of an orbit ephemeris file giving spacecraft coordinates relating to TREFPOS.
See also
--------
astropy.wcs.Wcsprm.trefpos
"""
xposure = """
``double`` effective exposure time in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.timeunit
"""
czphs = """
``double array[naxis]`` The time at the zero point of a phase axis, ``CSPHSia``.
An undefined value is represented by NaN.
"""
cperi = """
``double array[naxis]`` period of a phase axis, CPERIia.
An undefined value is represented by NaN.
"""
|
bcc02f2f3044a4633cd7b262acd698055737ec15e5ef808123c0bfc8f41bfd4d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines structured units and quantities.
"""
from __future__ import annotations # For python < 3.10
# Standard library
import operator
import numpy as np
from .core import UNITY, Unit, UnitBase
__all__ = ["StructuredUnit"]
DTYPE_OBJECT = np.dtype("O")
def _names_from_dtype(dtype):
"""Recursively extract field names from a dtype."""
names = []
for name in dtype.names:
subdtype = dtype.fields[name][0]
if subdtype.names:
names.append([name, _names_from_dtype(subdtype)])
else:
names.append(name)
return tuple(names)
def _normalize_names(names):
"""Recursively normalize, inferring upper level names for unadorned tuples.
Generally, we want the field names to be organized like dtypes, as in
``(['pv', ('p', 'v')], 't')``. But we automatically infer upper
field names if the list is absent from items like ``(('p', 'v'), 't')``,
by concatenating the names inside the tuple.
"""
result = []
for name in names:
if isinstance(name, str) and len(name) > 0:
result.append(name)
elif (
isinstance(name, list)
and len(name) == 2
and isinstance(name[0], str)
and len(name[0]) > 0
and isinstance(name[1], tuple)
and len(name[1]) > 0
):
result.append([name[0], _normalize_names(name[1])])
elif isinstance(name, tuple) and len(name) > 0:
new_tuple = _normalize_names(name)
name = "".join([(i[0] if isinstance(i, list) else i) for i in new_tuple])
result.append([name, new_tuple])
else:
raise ValueError(
f"invalid entry {name!r}. Should be a name, "
"tuple of names, or 2-element list of the "
"form [name, tuple of names]."
)
return tuple(result)
class StructuredUnit:
"""Container for units for a structured Quantity.
Parameters
----------
units : unit-like, tuple of unit-like, or `~astropy.units.StructuredUnit`
Tuples can be nested. If a `~astropy.units.StructuredUnit` is passed
in, it will be returned unchanged unless different names are requested.
names : tuple of str, tuple or list; `~numpy.dtype`; or `~astropy.units.StructuredUnit`, optional
Field names for the units, possibly nested. Can be inferred from a
structured `~numpy.dtype` or another `~astropy.units.StructuredUnit`.
For nested tuples, by default the name of the upper entry will be the
concatenation of the names of the lower levels. One can pass in a
list with the upper-level name and a tuple of lower-level names to
avoid this. For tuples, not all levels have to be given; for any level
not passed in, default field names of 'f0', 'f1', etc., will be used.
Notes
-----
It is recommended to initialize the class indirectly, using
`~astropy.units.Unit`. E.g., ``u.Unit('AU,AU/day')``.
When combined with a structured array to produce a structured
`~astropy.units.Quantity`, array field names will take precedence.
Generally, passing in ``names`` is needed only if the unit is used
unattached to a `~astropy.units.Quantity` and one needs to access its
fields.
Examples
--------
Various ways to initialize a `~astropy.units.StructuredUnit`::
>>> import astropy.units as u
>>> su = u.Unit('(AU,AU/day),yr')
>>> su
Unit("((AU, AU / d), yr)")
>>> su.field_names
(['f0', ('f0', 'f1')], 'f1')
>>> su['f1']
Unit("yr")
>>> su2 = u.StructuredUnit(((u.AU, u.AU/u.day), u.yr), names=(('p', 'v'), 't'))
>>> su2 == su
True
>>> su2.field_names
(['pv', ('p', 'v')], 't')
>>> su3 = u.StructuredUnit((su2['pv'], u.day), names=(['p_v', ('p', 'v')], 't'))
>>> su3.field_names
(['p_v', ('p', 'v')], 't')
>>> su3.keys()
('p_v', 't')
>>> su3.values()
(Unit("(AU, AU / d)"), Unit("d"))
Structured units share most methods with regular units::
>>> su.physical_type
((PhysicalType('length'), PhysicalType({'speed', 'velocity'})), PhysicalType('time'))
>>> su.si
Unit("((1.49598e+11 m, 1.73146e+06 m / s), 3.15576e+07 s)")
"""
def __new__(cls, units, names=None):
dtype = None
if names is not None:
if isinstance(names, StructuredUnit):
dtype = names._units.dtype
names = names.field_names
elif isinstance(names, np.dtype):
if not names.fields:
raise ValueError("dtype should be structured, with fields.")
dtype = np.dtype([(name, DTYPE_OBJECT) for name in names.names])
names = _names_from_dtype(names)
else:
if not isinstance(names, tuple):
names = (names,)
names = _normalize_names(names)
if not isinstance(units, tuple):
units = Unit(units)
if isinstance(units, StructuredUnit):
# Avoid constructing a new StructuredUnit if no field names
# are given, or if all field names are the same already anyway.
if names is None or units.field_names == names:
return units
# Otherwise, turn (the upper level) into a tuple, for renaming.
units = units.values()
else:
# Single regular unit: make a tuple for iteration below.
units = (units,)
if names is None:
names = tuple(f"f{i}" for i in range(len(units)))
elif len(units) != len(names):
raise ValueError("lengths of units and field names must match.")
converted = []
for unit, name in zip(units, names):
if isinstance(name, list):
# For list, the first item is the name of our level,
# and the second another tuple of names, i.e., we recurse.
unit = cls(unit, name[1])
name = name[0]
else:
# We are at the lowest level. Check unit.
unit = Unit(unit)
if dtype is not None and isinstance(unit, StructuredUnit):
raise ValueError(
"units do not match in depth with field "
"names from dtype or structured unit."
)
converted.append(unit)
self = super().__new__(cls)
if dtype is None:
dtype = np.dtype(
[
((name[0] if isinstance(name, list) else name), DTYPE_OBJECT)
for name in names
]
)
# Decay array to void so we can access by field name and number.
self._units = np.array(tuple(converted), dtype)[()]
return self
def __getnewargs__(self):
"""When de-serializing, e.g. pickle, start with a blank structure."""
return (), None
@property
def field_names(self):
"""Possibly nested tuple of the field names of the parts."""
return tuple(
([name, unit.field_names] if isinstance(unit, StructuredUnit) else name)
for name, unit in self.items()
)
# Allow StructuredUnit to be treated as an (ordered) mapping.
def __len__(self):
return len(self._units.dtype.names)
def __getitem__(self, item):
# Since we are based on np.void, indexing by field number works too.
return self._units[item]
def values(self):
return self._units.item()
def keys(self):
return self._units.dtype.names
def items(self):
return tuple(zip(self._units.dtype.names, self._units.item()))
def __iter__(self):
yield from self._units.dtype.names
# Helpers for methods below.
def _recursively_apply(self, func, cls=None):
"""Apply func recursively.
Parameters
----------
func : callable
Function to apply to all parts of the structured unit,
recursing as needed.
cls : type, optional
If given, should be a subclass of `~numpy.void`. By default,
will return a new `~astropy.units.StructuredUnit` instance.
"""
applied = tuple(func(part) for part in self.values())
# Once not NUMPY_LT_1_23: results = np.void(applied, self._units.dtype).
results = np.array(applied, self._units.dtype)[()]
if cls is not None:
return results.view((cls, results.dtype))
# Short-cut; no need to interpret field names, etc.
result = super().__new__(self.__class__)
result._units = results
return result
def _recursively_get_dtype(self, value, enter_lists=True):
"""Get structured dtype according to value, using our field names.
This is useful since ``np.array(value)`` would treat tuples as lower
levels of the array, rather than as elements of a structured array.
The routine does presume that the type of the first tuple is
representative of the rest. Used in ``_get_converter``.
For the special value of ``UNITY``, all fields are assumed to be 1.0,
and hence this will return an all-float dtype.
"""
if enter_lists:
while isinstance(value, list):
value = value[0]
if value is UNITY:
value = (UNITY,) * len(self)
elif not isinstance(value, tuple) or len(self) != len(value):
raise ValueError(f"cannot interpret value {value} for unit {self}.")
descr = []
for (name, unit), part in zip(self.items(), value):
if isinstance(unit, StructuredUnit):
descr.append(
(name, unit._recursively_get_dtype(part, enter_lists=False))
)
else:
# Got a part associated with a regular unit. Gets its dtype.
# Like for Quantity, we cast integers to float.
part = np.array(part)
part_dtype = part.dtype
if part_dtype.kind in "iu":
part_dtype = np.dtype(float)
descr.append((name, part_dtype, part.shape))
return np.dtype(descr)
@property
def si(self):
"""The `StructuredUnit` instance in SI units."""
return self._recursively_apply(operator.attrgetter("si"))
@property
def cgs(self):
"""The `StructuredUnit` instance in cgs units."""
return self._recursively_apply(operator.attrgetter("cgs"))
# Needed to pass through Unit initializer, so might as well use it.
def _get_physical_type_id(self):
return self._recursively_apply(
operator.methodcaller("_get_physical_type_id"), cls=Structure
)
@property
def physical_type(self):
"""Physical types of all the fields."""
return self._recursively_apply(
operator.attrgetter("physical_type"), cls=Structure
)
def decompose(self, bases=set()):
"""The `StructuredUnit` composed of only irreducible units.
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
`~astropy.units.StructuredUnit`
With the unit for each field containing only irreducible units.
"""
return self._recursively_apply(operator.methodcaller("decompose", bases=bases))
def is_equivalent(self, other, equivalencies=[]):
"""`True` if all fields are equivalent to the other's fields.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The structured unit to compare with, or what can initialize one.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
The list will be applied to all fields.
Returns
-------
bool
"""
try:
other = StructuredUnit(other)
except Exception:
return False
if len(self) != len(other):
return False
for self_part, other_part in zip(self.values(), other.values()):
if not self_part.is_equivalent(other_part, equivalencies=equivalencies):
return False
return True
def _get_converter(self, other, equivalencies=[]):
if not isinstance(other, type(self)):
other = self.__class__(other, names=self)
converters = [
self_part._get_converter(other_part, equivalencies=equivalencies)
for (self_part, other_part) in zip(self.values(), other.values())
]
def converter(value):
if not hasattr(value, "dtype"):
value = np.array(value, self._recursively_get_dtype(value))
result = np.empty_like(value)
for name, converter_ in zip(result.dtype.names, converters):
result[name] = converter_(value[name])
# Index with empty tuple to decay array scalars to numpy void.
return result if result.shape else result[()]
return converter
def to(self, other, value=np._NoValue, equivalencies=[]):
"""Return values converted to the specified unit.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The unit to convert to. If necessary, will be converted to
a `~astropy.units.StructuredUnit` using the dtype of ``value``.
value : array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If a sequence, the first element must have
entries of the correct type to represent all elements (i.e.,
not have, e.g., a ``float`` where other elements have ``complex``).
If not given, assumed to have 1. in all fields.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s).
Raises
------
UnitsError
If units are inconsistent
"""
if value is np._NoValue:
# We do not have UNITY as a default, since then the docstring
# would list 1.0 as default, yet one could not pass that in.
value = UNITY
return self._get_converter(other, equivalencies=equivalencies)(value)
def to_string(self, format="generic"):
"""Output the unit in the given format as a string.
Units are separated by commas.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
Notes
-----
Structured units can be written to all formats, but can be
re-read only with 'generic'.
"""
parts = [part.to_string(format) for part in self.values()]
out_fmt = "({})" if len(self) > 1 else "({},)"
if format.startswith("latex"):
# Strip $ from parts and add them on the outside.
parts = [part[1:-1] for part in parts]
out_fmt = "$" + out_fmt + "$"
return out_fmt.format(", ".join(parts))
def _repr_latex_(self):
return self.to_string("latex")
__array_ufunc__ = None
def __mul__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict="silent")
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part * other for part in self.values())
return self.__class__(new_units, names=self)
if isinstance(other, StructuredUnit):
return NotImplemented
# Anything not like a unit, try initialising as a structured quantity.
try:
from .quantity import Quantity
return Quantity(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict="silent")
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part / other for part in self.values())
return self.__class__(new_units, names=self)
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __str__(self):
return self.to_string()
def __repr__(self):
return f'Unit("{self.to_string()}")'
def __eq__(self, other):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() == other.values()
def __ne__(self, other):
if not isinstance(other, type(self)):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() != other.values()
class Structure(np.void):
"""Single element structure for physical type IDs, etc.
Behaves like a `~numpy.void` and thus mostly like a tuple which can also
be indexed with field names, but overrides ``__eq__`` and ``__ne__`` to
compare only the contents, not the field names. Furthermore, this way no
`FutureWarning` about comparisons is given.
"""
# Note that it is important for physical type IDs to not be stored in a
# tuple, since then the physical types would be treated as alternatives in
# :meth:`~astropy.units.UnitBase.is_equivalent`. (Of course, in that
# case, they could also not be indexed by name.)
def __eq__(self, other):
if isinstance(other, np.void):
other = other.item()
return self.item() == other
def __ne__(self, other):
if isinstance(other, np.void):
other = other.item()
return self.item() != other
def _structured_unit_like_dtype(
unit: UnitBase | StructuredUnit, dtype: np.dtype
) -> StructuredUnit:
"""Make a `StructuredUnit` of one unit, with the structure of a `numpy.dtype`.
Parameters
----------
unit : UnitBase
The unit that will be filled into the structure.
dtype : `numpy.dtype`
The structure for the StructuredUnit.
Returns
-------
StructuredUnit
"""
if isinstance(unit, StructuredUnit):
# If unit is structured, it should match the dtype. This function is
# only used in Quantity, which performs this check, so it's fine to
# return as is.
return unit
# Make a structured unit
units = []
for name in dtype.names:
subdtype = dtype.fields[name][0]
if subdtype.names is not None:
units.append(_structured_unit_like_dtype(unit, subdtype))
else:
units.append(unit)
return StructuredUnit(tuple(units), names=dtype.names)
|
11e933ffa47b8a496592b504e772ec76feeab45a4e36c796ed93851aa46cd60e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# STDLIB
import numbers
import operator
import re
import warnings
from fractions import Fraction
# THIRD PARTY
import numpy as np
# LOCAL
from astropy import config as _config
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.misc import isiterable
from .core import (
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
get_current_unit_registry,
)
from .format import Base, Latex
from .quantity_helper import can_have_arbitrary_unit, check_output, converters_and_unit
from .quantity_helper.function_helpers import (
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .structured import StructuredUnit, _structured_unit_like_dtype
from .utils import is_effectively_unity
__all__ = [
"Quantity",
"SpecificTypeQuantity",
"QuantityInfoBase",
"QuantityInfo",
"allclose",
"isclose",
]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ["Quantity.*"]
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(
100,
"The maximum size an array Quantity can be before its LaTeX "
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
"negative number means that the value will instead be whatever numpy "
"gets from get_printoptions.",
)
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
def __len__(self):
return len(self._dataiter)
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {"dtype", "unit"} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f"{val.value}"
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("value", "unit")
_construct_from_dict_args = ["value"]
_represent_as_dict_primary_data = "value"
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop("shape")
dtype = attrs.pop("dtype")
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {
key: (data if key == "value" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
map["copy"] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
If `None`, the normal `numpy.dtype` introspection is used, e.g.
preventing upcasting of integers.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __class_getitem__(cls, unit_shape_dtype):
"""Quantity Type Hints.
Unit-aware type hints are ``Annotated`` objects that encode the class,
the unit, and possibly shape and dtype information, depending on the
python and :mod:`numpy` versions.
Schematically, ``Annotated[cls[shape, dtype], unit]``
As a classmethod, the type is the class, ie ``Quantity``
produces an ``Annotated[Quantity, ...]`` while a subclass
like :class:`~astropy.coordinates.Angle` returns
``Annotated[Angle, ...]``.
Parameters
----------
unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple
Unit specification, can be the physical type (ie str or class).
If tuple, then the first element is the unit specification
and all other elements are for `numpy.ndarray` type annotations.
Whether they are included depends on the python and :mod:`numpy`
versions.
Returns
-------
`typing.Annotated`, `typing_extensions.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType`
Return type in this preference order:
* if python v3.9+ : `typing.Annotated`
* if :mod:`typing_extensions` is installed : `typing_extensions.Annotated`
* `astropy.units.Unit` or `astropy.units.PhysicalType`
Raises
------
TypeError
If the unit/physical_type annotation is not Unit-like or
PhysicalType-like.
Examples
--------
Create a unit-aware Quantity type annotation
>>> Quantity[Unit("s")]
Annotated[Quantity, Unit("s")]
See Also
--------
`~astropy.units.quantity_input`
Use annotations for unit checks on function arguments and results.
Notes
-----
With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also
static-type compatible.
"""
# LOCAL
from ._typing import HAS_ANNOTATED, Annotated
# process whether [unit] or [unit, shape, ptype]
if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype
target = unit_shape_dtype[0]
shape_dtype = unit_shape_dtype[1:]
else: # just unit
target = unit_shape_dtype
shape_dtype = ()
# Allowed unit/physical types. Errors if neither.
try:
unit = Unit(target)
except (TypeError, ValueError):
from astropy.units.physical import get_physical_type
try:
unit = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise TypeError(
"unit annotation is not a Unit or PhysicalType"
) from None
# Allow to sort of work for python 3.8- / no typing_extensions
# instead of bailing out, return the unit for `quantity_input`
if not HAS_ANNOTATED:
warnings.warn(
"Quantity annotations are valid static type annotations only"
" if Python is v3.9+ or `typing_extensions` is installed."
)
return unit
# Quantity does not (yet) properly extend the NumPy generics types,
# introduced in numpy v1.22+, instead just including the unit info as
# metadata using Annotated.
# TODO: ensure we do interact with NDArray.__class_getitem__.
return Annotated.__class_getitem__((cls, unit))
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# inexact -> upcast to float dtype
float_default = dtype is np.inexact
if float_default:
dtype = None
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and isinstance(value, cls)):
value = value.view(cls)
if float_default and value.dtype.kind in "iu":
dtype = float
return np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (
r"\s*[+-]?"
r"((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|"
r"([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))"
r"([eE][+-]?\d+)?"
r"[.+-]?"
)
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError(
f'Cannot parse "{value}" as a {cls.__name__}. It does not '
"start with a number."
)
unit_string = v.string[v.end() :].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif isiterable(value) and len(value) > 0:
# Iterables like lists and tuples.
if all(isinstance(v, Quantity) for v in value):
# If a list/tuple containing only quantities, convert all
# to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
elif (
dtype is None
and not hasattr(value, "dtype")
and isinstance(unit, StructuredUnit)
):
# Special case for list/tuple of values and a structured unit:
# ``np.array(value, dtype=None)`` would treat tuples as lower
# levels of the array, rather than as elements of a structured
# array, so we use the structure of the unit to help infer the
# structured dtype of the value.
dtype = unit._recursively_get_dtype(value)
using_default_unit = False
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
using_default_unit = True
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError(
f"The unit attribute {value.unit!r} of the input could "
"not be parsed as an astropy Unit."
) from exc
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# For no-user-input unit, make sure the constructed unit matches the
# structure of the data.
if using_default_unit and value.dtype.names is not None:
unit = value_unit = _structured_unit_like_dtype(value_unit, value.dtype)
# check that array contains numbers or long int objects
if value.dtype.kind in "OSU" and not (
value.dtype.kind == "O" and isinstance(value.item(0), numbers.Number)
):
raise TypeError("The value must be a valid Python or Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if float_default and value.dtype.kind in "iuO":
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, "_quantity_class", cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, "_unit", None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if "info" in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError(
"__array_wrap__ should not be used with a context any more since all "
"use should go through array_function. Please raise an issue on "
"https://github.com/astropy/astropy"
)
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity` or `NotImplemented`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
try:
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get("out", None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs["out"] = (out_array,) if function.nout == 1 else out_array
if method == "reduce" and "initial" in kwargs and unit is not None:
# Special-case for initial argument for reductions like
# np.add.reduce. This should be converted to the output unit as
# well, which is typically the same as the input unit (but can
# in principle be different: unitless for np.equal, radian
# for np.arctan2, though those are not necessarily useful!)
kwargs["initial"] = self._to_own_unit(
kwargs["initial"], check_precision=False, unit=unit
)
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, "value", input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
except (TypeError, ValueError) as e:
out_normalized = kwargs.get("out", tuple())
inputs_and_outputs = inputs + out_normalized
ignored_ufunc = (
None,
np.ndarray.__array_ufunc__,
type(self).__array_ufunc__,
)
if not all(
getattr(type(io), "__array_ufunc__", None) in ignored_ufunc
for io in inputs_and_outputs
):
return NotImplemented
else:
raise e
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in zip(result, unit, out)
)
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
elif isinstance(out, Quantity):
# For given Quantity output, just set the unit. We know the unit
# is not None and the output is of the correct Quantity subclass,
# as it was passed through check_output.
# (We cannot do this unconditionally, though, since it is possible
# for out to be ndarray and the unit to be dimensionless.)
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, "_quantity_class", Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
if isinstance(self._unit, StructuredUnit) or isinstance(
unit, StructuredUnit
):
unit = StructuredUnit(unit, self.dtype)
else:
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict="silent")
if not isinstance(unit, (UnitBase, StructuredUnit)):
raise UnitTypeError(
f"{self.__class__.__name__} instances require normal units, "
f"not {unit.__class__} instances."
)
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
if not self.dtype.names or isinstance(self.unit, StructuredUnit):
# Standard path, let unit to do work.
return self.unit.to(
unit, self.view(np.ndarray), equivalencies=equivalencies
)
else:
# The .to() method of a simple unit cannot convert a structured
# dtype, so we work around it, by recursing.
# TODO: deprecate this?
# Convert simple to Structured on initialization?
result = np.empty_like(self.view(np.ndarray))
for name in self.dtype.names:
result[name] = self[name]._to_value(unit, equivalencies)
return result
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
elif not self.dtype.names:
# For non-structured, we attempt a short-cut, where we just get
# the scale. If that is 1, we do not have to do anything.
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
else:
# For structured arrays, we go the default route.
value = self._to_value(unit, equivalencies)
# Index with empty tuple to decay array scalars in to numpy scalars.
return value if value.shape else value[()]
value = property(
to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""",
)
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
def _recursively_apply(self, func):
"""Apply function recursively to every field.
Returns a copy with the result.
"""
result = np.empty_like(self)
result_value = result.view(np.ndarray)
result_unit = ()
for name in self.dtype.names:
part = func(self[name])
result_value[name] = part.value
result_unit += (part.unit,)
result._set_unit(result_unit)
return result
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("si"))
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale, si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("cgs"))
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale, cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return super().__dir__()
dir_values = set(super().__dir__())
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(equivalencies):
dir_values.update(equivalent.names)
return sorted(dir_values)
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member"
)
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies
)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'"
)
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting, and does not
# deal well with structured arrays (nor does the ufunc).
def __eq__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return False
except Exception:
return NotImplemented
return self.value.__eq__(other_value)
def __ne__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return True
except Exception:
return NotImplemented
return self.value.__ne__(other_value)
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented # try other.__rlshift__(self)
try:
factor = self.unit._to(other)
except UnitConversionError: # incompatible, or requires an Equivalency
return NotImplemented
except AttributeError: # StructuredUnit does not have `_to`
# In principle, in-place might be possible.
return NotImplemented
view = self.view(np.ndarray)
try:
view *= factor # operates on view
except TypeError:
# The error is `numpy.core._exceptions._UFuncOutputCastingError`,
# which inherits from `TypeError`.
return NotImplemented
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(
">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning,
)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
"""Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
"""
Right Multiplication between `Quantity` objects and other objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
"""Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
"""Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1.0 / self.value, other / self.unit)
return super().__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other), self.unit**other)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value is not"
" iterable"
)
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
if isinstance(key, str) and isinstance(self.unit, StructuredUnit):
return self._new_view(self.view(np.ndarray)[key], self.unit[key])
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value "
"does not support indexing"
)
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
if isinstance(i, str):
# Indexing will cause a different unit, so by doing this in
# two steps we effectively try with the right unit.
self[i][...] = value
return
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and "info" in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn(
"The truth value of a Quantity is ambiguous. "
"In the future this will raise a ValueError.",
AstropyDeprecationWarning,
)
return True
def __len__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value has no len()"
)
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError(
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = " " + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string that uses
negative exponents instead of fractions
subfmt : str, optional
Subformat of the result. For the moment, only used for
``format='latex'`` and ``format='latex_inline'``. Supported
values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt
)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
formats["latex_inline"] = formats["latex"]
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f"{self.value}{self._unitstr:s}"
else:
# np.array2string properly formats arrays as well as scalars
return (
np.array2string(self.value, precision=precision, floatmode="fixed")
+ self._unitstr
)
# else, for the moment we assume format="latex" or "latex_inline".
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value, format_spec=format_spec)
def complex_formatter(value):
return "({}{}i)".format(
Latex.format_exponential_notation(value.real, format_spec=format_spec),
Latex.format_exponential_notation(
value.imag, format_spec="+" + format_spec
),
)
# The view is needed for the scalar case - self.value might be float.
latex_value = np.array2string(
self.view(np.ndarray),
threshold=(
conf.latex_array_threshold
if conf.latex_array_threshold > -1
else pops["threshold"]
),
formatter={
"float_kind": float_formatter,
"complex_kind": complex_formatter,
},
max_line_width=np.inf,
separator=",~",
)
latex_value = latex_value.replace("...", r"\dots")
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
if self.unit is None:
latex_unit = _UNIT_NOT_INITIALISED
elif format == "latex":
latex_unit = self.unit._repr_latex_()[1:-1] # note this is unicode
elif format == "latex_inline":
latex_unit = self.unit.to_string(format="latex_inline")[1:-1]
delimiter_left, delimiter_right = formats[format][subfmt]
return rf"{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}"
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = "<" + self.__class__.__name__ + " "
arrstr = np.array2string(
self.view(np.ndarray), separator=", ", prefix=prefixstr
)
return f"{prefixstr}{arrstr}{self._unitstr:s}>"
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format="latex", subfmt="inline")
def __format__(self, format_spec):
try:
return self.to_string(format=format_spec)
except ValueError:
# We might have a unit format not implemented in `to_string()`.
if format_spec in Base.registry:
if self.unit is dimensionless_unscaled:
return f"{self.value}"
else:
return f"{self.value} {format(self.unit, format_spec)}"
# Can the value be formatted on its own?
try:
return f"{format(self.value, format_spec)}{self._unitstr:s}"
except ValueError:
# Format the whole thing as a single string.
return format(f"{self.value}{self._unitstr:s}", format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, "scale"):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError(
"cannot make a list of Quantities. Get list of values with"
" q.value.tolist()."
)
def _to_own_unit(self, value, check_precision=True, *, unit=None):
"""Convert value to one's own unit (or that given).
Here, non-quantities are treated as dimensionless, and care is taken
for values of 0, infinity or nan, which are allowed to have any unit.
Parameters
----------
value : anything convertible to `~astropy.units.Quantity`
The value to be converted to the requested unit.
check_precision : bool
Whether to forbid conversion of float to integer if that changes
the input number. Default: `True`.
unit : `~astropy.units.Unit` or None
The unit to convert to. By default, the unit of ``self``.
Returns
-------
value : number or `~numpy.ndarray`
In the requested units.
"""
if unit is None:
unit = self.unit
try:
_value = value.to_value(unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if value is np.ma.masked or (
value is np.ma.masked_print_option and self.dtype.kind == "O"
):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if not hasattr(value, "unit") and can_have_arbitrary_unit(
as_quantity.value
):
_value = as_quantity.value
else:
raise
if self.dtype.kind == "i" and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all((self_dtype_array == _value) | np.isnan(_value)):
raise TypeError(
"cannot convert value type to array type without precision loss"
)
# Setting names to ensure things like equality work (note that
# above will have failed already if units did not match).
if self.dtype.names:
_value.dtype.names = self.dtype.names
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] + (self._to_own_unit(args[-1]),)))
def tostring(self, order="C"):
raise NotImplementedError(
"cannot write Quantities to string. Write array with"
" q.value.tostring(...)."
)
def tobytes(self, order="C"):
raise NotImplementedError(
"cannot write Quantities to bytes. Write array with q.value.tobytes(...)."
)
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError(
"cannot write Quantities to file. Write array with q.value.tofile(...)"
)
def dump(self, file):
raise NotImplementedError(
"cannot dump Quantities to file. Write array with q.value.dump()"
)
def dumps(self):
raise NotImplementedError(
"cannot dump Quantities to string. Write array with q.value.dumps()"
)
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode="raise"):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode="raise"):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode="raise"):
raise NotImplementedError(
"cannot choose based on quantity. Choose using array with"
" q.value.choose(...)"
)
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind="quicksort", order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(
np.array(self), self._to_own_unit(v, check_precision=False), *args, **kwargs
) # avoid numpy 1.6 problem
if NUMPY_LT_1_22:
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
else:
def argmax(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmax(axis=axis, out=out, keepdims=keepdims)
def argmin(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmin(axis=axis, out=out, keepdims=keepdims)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn(
f"function '{function.__name__}' is not known to astropy's Quantity."
" Will run it anyway, hoping it will treat ndarray subclasses"
" correctly. Please raise an issue at"
" https://github.com/astropy/astropy/issues.",
AstropyWarning,
)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(
issubclass(t, np.ndarray) and not issubclass(t, Quantity) for t in types
):
raise TypeError(
f"the Quantity implementation cannot handle {function} "
"with the given arguments."
) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple(
(arg.value if isinstance(arg, Quantity) else arg) for arg in args
)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs["out"] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype, out=out)
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.var,
axis,
dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
unit=self.unit**2,
)
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
return self._wrap_function(
np.mean, axis, dtype, out=out, keepdims=keepdims, where=where
)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, "unit", dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)"
)
def any(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)"
)
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
if NUMPY_LT_1_22:
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims)
else:
# TODO: deprecate this method? It is not on ndarray, and we do not
# support nanmean, etc., so why this one?
def nansum(
self, axis=None, out=None, keepdims=False, *, initial=None, where=True
):
if initial is not None:
initial = self._to_own_unit(initial)
return self._wrap_function(
np.nansum,
axis,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'".format(
type(self).__name__, self._equivalent_unit
)
+ (
", but no unit was given."
if unit is None
else f", so cannot set it to '{unit}'."
)
)
super()._set_unit(unit)
def isclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
4874886180260bbe601de476358369b0fc496d5e4bbf0ead3f6eae5fd26e776a | import copy
import operator
import re
import warnings
import erfa
import numpy as np
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.table import QTable
from astropy.time import Time
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle
from .baseframe import BaseCoordinateFrame, GenericFrame, frame_transform_graph
from .distances import Distance
from .representation import (
RadialDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from .sky_coordinate_parsers import (
_get_frame_class,
_get_frame_without_data,
_parse_coordinate_data,
)
__all__ = ["SkyCoord", "SkyCoordInfo"]
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ["{0." + compname + ".value:}" for compname in repr_data.components]
return ",".join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ",".join(
str(getattr(repr_data, comp).unit) or "None"
for comp in repr_data.components
)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if issubclass(sc.representation_type, SphericalRepresentation) and isinstance(
sc.data, UnitSphericalRepresentation
):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type, in_frame_units=True)
return repr_data
def _represent_as_dict(self):
sc = self._parent
attrs = list(sc.representation_component_names)
# Don't output distance unless it's actually distance.
if isinstance(sc.data, UnitSphericalRepresentation):
attrs = attrs[:-1]
diff = sc.data.differentials.get("s")
if diff is not None:
diff_attrs = list(sc.get_representation_component_names("s"))
# Don't output proper motions if they haven't been specified.
if isinstance(diff, RadialDifferential):
diff_attrs = diff_attrs[2:]
# Don't output radial velocity unless it's actually velocity.
elif isinstance(
diff, (UnitSphericalDifferential, UnitSphericalCosLatDifferential)
):
diff_attrs = diff_attrs[:-1]
attrs.extend(diff_attrs)
attrs.extend(frame_transform_graph.frame_attributes.keys())
out = super()._represent_as_dict(attrs)
out["representation_type"] = sc.representation_type.get_name()
out["frame"] = sc.frame.name
# Note that sc.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
def new_like(self, skycoords, length, metadata_conflicts="warn", name=None):
"""
Return a new SkyCoord instance which is consistent with the input
SkyCoord objects ``skycoords`` and has ``length`` rows. Being
"consistent" is defined as being able to set an item from one to each of
the rest without any exception being raised.
This is intended for creating a new SkyCoord instance whose elements can
be set in-place for table operations like join or vstack. This is used
when a SkyCoord object is used as a mixin column in an astropy Table.
The data values are not predictable and it is expected that the consumer
of the object will fill in all values.
Parameters
----------
skycoords : list
List of input SkyCoord objects
length : int
Length of the output skycoord object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output name (sets output skycoord.info.name)
Returns
-------
skycoord : |SkyCoord| (or subclass)
Instance of this class consistent with ``skycoords``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
skycoords, metadata_conflicts, name, ("meta", "description")
)
skycoord0 = skycoords[0]
# Make a new SkyCoord object with the desired length and attributes
# by using the _apply / __getitem__ machinery to effectively return
# skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame
# attributes with the right shape.
indexes = np.zeros(length, dtype=np.int64)
out = skycoord0[indexes]
# Use __setitem__ machinery to check for consistency of all skycoords
for skycoord in skycoords[1:]:
try:
out[0] = skycoord[0]
except Exception as err:
raise ValueError("Input skycoords are inconsistent.") from err
# Set (merged) info attributes
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The |SkyCoord| class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: https://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a |SkyCoord|
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias -- lower-case versions of the
class name that allow for creating a |SkyCoord| object and transforming
frames without explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this |SkyCoord| should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied coordinate values.
If only one unit is supplied then it applies to all values.
Note that passing only one unit might lead to unit conversion errors
if the coordinate values are expected to have mixed physical meanings
(e.g., angles and distances).
obstime : time-like, optional
Time(s) of observation.
equinox : time-like, optional
Coordinate frame equinox time.
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : angle-like, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including ``ICRS``,
``FK5``, ``FK4``, and ``FK4NoETerms``.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components, in angle per time units.
l, b : angle-like, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the ``Galactic`` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components in the `~astropy.coordinates.Galactic` frame,
in angle per time units.
x, y, z : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))
):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError(
"Cannot initialize from a coordinate frame "
"instance without coordinate data"
)
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs
)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError("Cannot create a SkyCoord without data")
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
# TODO: remove these in future
@property
def representation(self):
return self.frame.representation
@representation.setter
def representation(self, value):
self.frame.representation = value
@property
def shape(self):
return self.frame.shape
def __eq__(self, value):
"""Equality operator for SkyCoord
This implements strict equality and requires that the frames are
equivalent, extra frame attributes are equivalent, and that the
representation data are exactly equal.
"""
if isinstance(value, BaseCoordinateFrame):
if value._data is None:
raise ValueError("Can only compare SkyCoord to Frame with data")
return self.frame == value
if not isinstance(value, SkyCoord):
return NotImplemented
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(
f"cannot compare: extra frame attribute '{attr}' is not equivalent"
" (perhaps compare the frames directly to avoid this exception)"
)
return self._sky_coord_frame == value._sky_coord_frame
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method, *args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, "shape", ()):
value = apply_method(value)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, "_" + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
"""Implement self[item] = value for SkyCoord
The right hand ``value`` must be strictly consistent with self:
- Identical class
- Equivalent frames
- Identical representation_types
- Identical representation differentials keys
- Identical frame attributes
- Identical "extra" frame attributes (e.g. obstime for an ICRS coord)
With these caveats the setitem ends up as effectively a setitem on
the representation data.
self.frame.data[item] = value.frame.data
"""
if self.__class__ is not value.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(f"attribute {attr} is not equivalent")
# Set the frame values. This checks frame equivalence and also clears
# the cache to ensure that the object is not in an inconsistent state.
self._sky_coord_frame[item] = value._sky_coord_frame
def insert(self, obj, values, axis=0):
"""
Insert coordinate values before the given indices in the object and
return a new Frame object.
The values to be inserted must conform to the rules for in-place setting
of |SkyCoord| objects.
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple insertion before the index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.coordinates.SkyCoord` instance
New coordinate object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
# Set the output values. This is where validation of `values` takes place to ensure
# that it can indeed be inserted.
out[:idx0] = self[:idx0]
out[idx0 : idx0 + n_values] = values
out[idx0 + n_values :] = self[idx0:]
return out
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : frame class, frame object, or str
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
# TODO! like matplotlib, do string overrides for modified methods
new_frame = (
_get_frame_class(new_frame) if isinstance(new_frame, str) else new_frame
)
return self.frame.is_transformable_to(new_frame)
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
|SkyCoord| that are not part of the destination frame's definition are
kept (stored on the resulting |SkyCoord|), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without losing obstime).
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` class or instance, or |SkyCoord| instance
The frame to transform this coordinate into. If a |SkyCoord|, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : |SkyCoord|
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if frame_val is not None and not (
merge_attributes and frame.is_frame_attr_default(attr)
):
frame_kwargs[attr] = frame_val
elif self_val is not None and not self.is_frame_attr_default(attr):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError(
"Transform `frame` must be a frame name, class, or instance"
)
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError(
f"Cannot transform from {self.frame.__class__} to {new_frame_cls}"
)
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in set(new_coord.frame_attributes) & set(frame_kwargs.keys()):
frame_kwargs.pop(attr)
# Always remove the origin frame attribute, as that attribute only makes
# sense with a SkyOffsetFrame (in which case it will be stored on the frame).
# See gh-11277.
# TODO: Should it be a property of the frame attribute that it can
# or cannot be stored on a SkyCoord?
frame_kwargs.pop("origin", None)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation."
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : |SkyCoord|
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
from .builtin_frames.icrs import ICRS
if (new_obstime is None) == (dt is None):
raise ValueError(
"You must specify one of `new_obstime` or `dt`, but not both."
)
# Validate that we have velocity info
if "s" not in self.frame.data.differentials:
raise ValueError("SkyCoord requires velocity data to evolve the position.")
if "obstime" in self.frame.frame_attributes:
raise NotImplementedError(
"Updating the coordinates in a frame with explicit time dependence is"
" currently not supported. If you would like this functionality, please"
" open an issue on github:\nhttps://github.com/astropy/astropy"
)
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError(
"This object has no associated `obstime`. apply_space_motion() must"
" receive a time difference, `dt`, and not a new obstime."
)
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time("J2000")
new_obstime = None # we don't actually know the initial obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials["s"]
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.0
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km / u.s)
except u.UnitConversionError: # No RV
rv = 0.0
starpm = erfa.pmsafe(
icrsrep.lon.radian,
icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian / u.yr),
icrsvel.d_lat.to_value(u.radian / u.yr),
plx,
rv,
t1.jd1,
t1.jd2,
t2.jd1,
t2.jd2,
)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(
ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian / u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian / u.yr, copy=False),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km / u.s, copy=False),
differential_type=SphericalDifferential,
)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {
attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names
}
frattrs["obstime"] = new_obstime
result = self.__class__(icrs2, **frattrs).transform_to(self.frame)
# Without this the output might not have the right differential type.
# Not sure if this fixes the problem or just hides it. See #11932
result.differential_type = self.differential_type
return result
def _is_name(self, string):
"""
Returns whether a string is one of the aliases for the frame.
"""
return self.frame.name == string or (
isinstance(self.frame.name, list) and string in self.frame.name
)
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the primary transform graph.
"""
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.frame_attributes:
return getattr(self.frame, attr)
else:
return getattr(self, "_" + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Call __getattribute__; this will give correct exception.
return self.__getattribute__(attr)
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__("_" + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__("_" + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
def __dir__(self):
"""
Override the builtin `dir` behavior to include:
- Transforms available by aliases
- Attribute / methods of the underlying self.frame object
"""
dir_values = set(super().__dir__())
# determine the aliases that this can be transformed to.
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(
{attr for attr in dir(self.frame) if not attr.startswith("_")}
)
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return sorted(dir_values)
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ": " + frameattrs
data = self.frame._data_repr()
if data:
data = ": " + data
return f"<{clsnm} ({coonm}{frameattrs}){data}>"
def to_string(self, style="decimal", **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
**kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {
"hmsdms": {
"lonargs": {"unit": u.hour, "pad": True},
"latargs": {"unit": u.degree, "pad": True, "alwayssign": True},
},
"dms": {"lonargs": {"unit": u.degree}, "latargs": {"unit": u.degree}},
"decimal": {
"lonargs": {"unit": u.degree, "decimal": True},
"latargs": {"unit": u.degree, "decimal": True},
},
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]["lonargs"])
latargs.update(styles[style]["latargs"])
else:
raise ValueError(f"Invalid style. Valid options are: {','.join(styles)}")
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (
f"{sph_coord.lon.to_string(**lonargs)}"
f" {sph_coord.lat.to_string(**latargs)}"
)
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [
f"{lonangle.to_string(**lonargs)} {latangle.to_string(**latargs)}"
]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def to_table(self):
"""
Convert this |SkyCoord| to a |QTable|.
Any attributes that have the same length as the |SkyCoord| will be
converted to columns of the |QTable|. All other attributes will be
recorded as metadata.
Returns
-------
`~astropy.table.QTable`
A |QTable| containing the data of this |SkyCoord|.
Examples
--------
>>> sc = SkyCoord(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg,
... obstime=Time([2000, 2010], format='jyear'))
>>> t = sc.to_table()
>>> t
<QTable length=2>
ra dec obstime
deg deg
float64 float64 Time
------- ------- -------
40.0 0.0 2000.0
70.0 -20.0 2010.0
>>> t.meta
{'representation_type': 'spherical', 'frame': 'icrs'}
"""
self_as_dict = self.info._represent_as_dict()
tabledata = {}
metadata = {}
# Record attributes that have the same length as self as columns in the
# table, and the other attributes as table metadata. This matches
# table.serialize._represent_mixin_as_column().
for key, value in self_as_dict.items():
if getattr(value, "shape", ())[:1] == (len(self),):
tabledata[key] = value
else:
metadata[key] = value
return QTable(tabledata, meta=metadata)
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two |SkyCoord| objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a |SkyCoord| or a subclass of
`~astropy.coordinates.BaseCoordinateFrame`.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if not BaseCoordinateFrame._frameattr_equiv(
getattr(self, fattrnm), getattr(other, fattrnm)
):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't frame-like"
)
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"This object does not have a distance; cannot compute 3d separation."
)
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"The other object does not have a distance; "
"cannot compute 3d separation."
)
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation :
for the *total* angular offset (not broken out into components).
position_angle :
for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError(
"Tried to use spherical_offsets_to with two non-matching frames!"
)
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def spherical_offsets_by(self, d_lon, d_lat):
"""
Computes the coordinate that is a specified pair of angular offsets away
from this coordinate.
Parameters
----------
d_lon : angle-like
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
d_lat : angle-like
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Returns
-------
newcoord : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
``d_lat`` in the latitude direction and ``d_lon`` in the longitude
direction.
Notes
-----
This internally uses `~astropy.coordinates.SkyOffsetFrame` to do the
transformation. For a more complete set of transform offsets, use
`~astropy.coordinates.SkyOffsetFrame` or `~astropy.wcs.WCS` manually.
This specific method can be reproduced by doing
``SkyCoord(SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))``.
See Also
--------
spherical_offsets_to : compute the angular offsets to another coordinate
directional_offset_by : offset a coordinate by an angle in a direction
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return self.__class__(
SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self)
)
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given `position_angle` and `separation`.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
position_angle : inverse operation for the ``position_angle`` component
separation : inverse operation for the ``separation`` component
"""
from . import angle_utilities
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = angle_utilities.offset_by(
lon=slon, lat=slat, posang=position_angle, distance=separation
)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_sky(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_sky"
)
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_3d(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_3d"
)
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : coordinate-like
The coordinates to search around to try to find matching points in
this |SkyCoord|. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` ['angle']
The on-sky separation to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(
searcharoundcoords, self, seplimit, storekdtree="_kdtree_sky"
)
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this |SkyCoord|. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` ['length']
The physical radius to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(
searcharoundcoords, self, distlimit, storekdtree="_kdtree_3d"
)
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
SkyCoord and another.
Parameters
----------
other : |SkyCoord|
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError(
"Can only get position_angle to another "
"SkyCoord or a coordinate frame with data"
)
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this SkyCoord at the origin.
Parameters
----------
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this |SkyCoord| (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) of the coordinates this SkyCoord contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array |SkyCoord|, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position information is used
extra_frameattrs = {nm: getattr(self, nm) for nm in self._extra_frameattr_names}
novel = SkyCoord(
self.realize_frame(self.data.without_differentials()), **extra_frameattrs
)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
# return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode="all"):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
from astropy.wcs.utils import skycoord_to_pixel
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode="all"):
"""
Create a new SkyCoord from pixel coordinates using a World Coordinate System.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : `~astropy.coordinates.SkyCoord`
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
from astropy.wcs.utils import pixel_to_skycoord
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the coordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax, xmax = image.shape
else:
xmax, ymax = wcs._naxis
import warnings
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x, y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(
self, kind="barycentric", obstime=None, location=None
):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the |SkyCoord| will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the |SkyCoord| will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this |SkyCoord|.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` ['speed']
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. The barycentric correction returned uses the optical
approximation v = z * c. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord, EarthLocation
>>> from astropy.constants import c
>>> t = Time(56370.5, format='mjd', scale='utc')
>>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m)
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA
>>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP
Also note that this method returns the correction velocity in the so-called
*optical convention*::
>>> vcorr = zb * c # doctest: +SKIP
where ``zb`` is the barycentric correction redshift as defined in section 3
of Wright & Eastman (2014). The application formula given above follows from their
equation (11) under assumption that the radial velocity ``rv`` has also been defined
using the same optical convention. Note, this can be regarded as a matter of
velocity definition and does not by itself imply any loss of accuracy, provided
sufficient care has been taken during interpretation of the results. If you need
the barycentric correction expressed as the full relativistic velocity (e.g., to provide
it as the input to another software which performs the application), the
following recipe can be used::
>>> zb = vcorr / c # doctest: +REMOTE_DATA
>>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA
>>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA
or alternatively using just equivalencies::
>>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA
See also `~astropy.units.equivalencies.doppler_optical`,
`~astropy.units.equivalencies.doppler_radio`, and
`~astropy.units.equivalencies.doppler_relativistic` for more information on
the velocity conventions.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
>>> from astropy.coordinates import solar_system_ephemeris
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA
... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel
# location validation
timeloc = getattr(obstime, "location", None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError(
"`location` cannot be in both the passed-in `obstime` and this"
" `SkyCoord` because it is ambiguous which is meant for the"
" radial_velocity_correction."
)
elif timeloc is not None:
location = timeloc
else:
raise TypeError(
"Must provide a `location` to radial_velocity_correction, either as"
" a SkyCoord frame attribute, as an attribute on the passed in"
" `obstime`, or in the method call."
)
elif self.location is not None or timeloc is not None:
raise ValueError(
"Cannot compute radial velocity correction if `location` argument is"
" passed in and there is also a `location` attribute on this SkyCoord"
" or the passed-in `obstime`."
)
# obstime validation
coo_at_rv_obstime = self # assume we need no space motion for now
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError(
"Must provide an `obstime` to radial_velocity_correction, either as"
" a SkyCoord frame attribute or in the method call."
)
elif self.obstime is not None and self.frame.data.differentials:
# we do need space motion after all
coo_at_rv_obstime = self.apply_space_motion(obstime)
elif self.obstime is None:
# warn the user if the object has differentials set
if "s" in self.data.differentials:
warnings.warn(
"SkyCoord has space motion, and therefore the specified "
"position of the SkyCoord may not be the same as "
"the `obstime` for the radial velocity measurement. "
"This may affect the rv correction at the order of km/s"
"for very high proper motions sources. If you wish to "
"apply space motion of the SkyCoord to correct for this"
"the `obstime` attribute of the SkyCoord must be set",
AstropyUserWarning,
)
pos_earth, v_earth = get_body_barycentric_posvel("earth", obstime)
if kind == "barycentric":
v_origin_to_earth = v_earth
elif kind == "heliocentric":
v_sun = get_body_barycentric_posvel("sun", obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError(
"`kind` argument to radial_velocity_correction must "
f"be 'barycentric' or 'heliocentric', but got '{kind}'"
)
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
icrs_cart = coo_at_rv_obstime.icrs.cartesian
icrs_cart_novel = icrs_cart.without_differentials()
if self.data.__class__ is UnitSphericalRepresentation:
targcart = icrs_cart_novel
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
targcart = icrs_cart_novel - obs_icrs_cart
targcart /= targcart.norm()
if kind == "barycentric":
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm() ** 2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr / speed_of_light)
# try and get terms corresponding to stellar motion.
if icrs_cart.differentials:
try:
ro = self.icrs.cartesian
beta_star = ro.differentials["s"].to_cartesian() / speed_of_light
# ICRS unit vector at coordinate epoch
ro = ro.without_differentials()
ro /= ro.norm()
zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart))
except u.UnitConversionError:
warnings.warn(
"SkyCoord contains some velocity information, but not enough to"
" calculate the full space motion of the source, and so this"
" has been ignored for the purposes of calculating the radial"
" velocity correction. This can lead to errors on the order of"
" metres/second.",
AstropyUserWarning,
)
zb = zb - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new SkyCoord from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames (including
differentials), if they are also followed by a non-alphanumeric
character. It will also match columns that *end* with the component name
if a non-alphanumeric character is *before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : `~astropy.table.Table` or subclass
The table to load data from.
**coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord` or subclass
The new instance.
Raises
------
ValueError
If more than one match is found in the table for a component,
unless the additional matches are also valid frame component names.
If a "coord_kwargs" is provided for a value also found in the table.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs["frame"] = coord_kwargs.get("frame", frame)
representation_component_names = set(
frame.get_representation_component_names()
).union(set(frame.get_representation_component_names("s")))
comp_kwargs = {}
for comp_name in representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r"(\W|\b|_)"
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r".*(\W|\b|_)" + comp_name + r"\b"
# the final regex ORs together the two patterns
rex = re.compile(
rf"({starts_with_comp})|({ends_with_comp})", re.IGNORECASE | re.UNICODE
)
# find all matches
matches = {col_name for col_name in table.colnames if rex.match(col_name)}
# now need to select among matches, also making sure we don't have
# an exact match with another component
if len(matches) == 0: # no matches
continue
elif len(matches) == 1: # only one match
col_name = matches.pop()
else: # more than 1 match
# try to sieve out other components
matches -= representation_component_names - {comp_name}
# if there's only one remaining match, it worked.
if len(matches) == 1:
col_name = matches.pop()
else:
raise ValueError(
f'Found at least two matches for component "{comp_name}":'
f' "{matches}". Cannot guess coordinates from a table with this'
" ambiguity."
)
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError(
f'Found column "{v.name}" in table, but it was already provided as'
' "{k}" keyword to guess_from_table function.'
)
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame="icrs", parse=False, cache=True):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names, e.g.,
'CRTS SSS100805 J194428-420209', this may be much faster than a
Sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, optional
Determines whether to cache the results or not. To update or
overwrite an existing value, pass ``cache='update'``.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse, cache=cache)
icrs_sky_coord = cls(icrs_coord)
if frame in ("icrs", icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
|
ce9bde1e5e8d38917f2634f166f8500c89a8d585b5b86298dc901a12c7b185dc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import inspect
import subprocess
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import contextmanager, suppress
from inspect import signature
from warnings import warn
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"TransformGraph",
"CoordinateTransform",
"FunctionTransform",
"BaseAffineTransform",
"AffineTransform",
"StaticMatrixTransform",
"DynamicMatrixTransform",
"FunctionTransformWithFiniteDifference",
"CompositeTransform",
]
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, "name", None)
if nm is not None:
if not isinstance(nm, list):
nm = [nm]
for name in nm:
dct[name] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this TransformGraph.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this TransformGraph.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this TransformGraph.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : `~astropy.coordinates.CoordinateTransform`
The transformation object. Typically a
`~astropy.coordinates.CoordinateTransform` object, although it may
be some other callable that is called with the same signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
if not callable(transform):
raise TypeError("transform must be callable")
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError(
f"Frame(s) {list(invalid_frames)} contain invalid attribute names:"
f" {invalid_attrs}\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
)
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or None
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or None
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or None
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError("fromsys and tosys must both be None if either are")
if transform is None:
raise ValueError("cannot give all Nones to remove_transform")
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if agraph[b] is transform:
del agraph[b]
fromsys = a
break
# If the transform was found, need to break out of the outer for loop too
if fromsys:
break
else:
raise ValueError(f"Could not find transform {transform} in the graph")
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError(
f"Current transform from {fromsys} to {tosys} is not"
f" {transform}"
)
# Remove the subgraph if it is now empty
if self._graph[fromsys] == {}:
self._graph.pop(fromsys)
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of class or None
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : float or int
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float("inf")
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, "priority") else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(getattr(agraph[b], "priority", 1))
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError(
"n2 not in heap - this should be impossible!"
)
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""Generates and returns the CompositeTransform for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `~astropy.coordinates.CompositeTransform` or None
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
A `~astropy.coordinates.CompositeTransform` is always returned, because
`~astropy.coordinates.CompositeTransform` is slightly more adaptable in
the way it can be called than other transform classes. Specifically, it
takes care of intermediate steps of transformations in a way that is
consistent with 1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys is not a class")
if not inspect.isclass(tosys):
raise TypeError("tosys is not a class")
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(
transforms, fromsys, tosys, register_graph=False
)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
`BaseCoordinateFrame` subclass
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(
self,
priorities=True,
addnodes=[],
savefn=None,
savelayout="plain",
saveformat=None,
color_edges=True,
):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : None or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = {
f: [k for k, v in self._cached_names.items() if v == f]
for f in self.frame_set
}
for n in nodes:
if n in invclsaliases:
aliases = "`\\n`".join(invclsaliases[n])
nodenames.append(
'{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, aliases)
)
else:
nodenames.append(n.__name__ + "[ shape=oval ]")
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__] if color_edges else "black"
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ["digraph AstropyCoordinateTransformGraph {"]
lines.append("graph [rankdir=LR]")
lines.append("; ".join(nodenames) + ";")
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = "[ {0} {1} ]"
if priorities:
priority_part = f'label = "{weights}"'
else:
priority_part = ""
color_part = f'color = "{color}"'
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append(f"{enm1} -> {enm2}{labelstr};")
lines.append("")
lines.append("overlap=false")
lines.append("}")
dotgraph = "\n".join(lines)
if savefn is not None:
if savelayout == "plain":
with open(savefn, "w") as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append("-T" + saveformat)
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError("problem running graphviz: \n" + stderr)
with open(savefn, "w") as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <https://networkx.github.io/>`_
package installed for this to work.
Returns
-------
nxgraph : ``networkx.Graph``
This `~astropy.coordinates.TransformGraph` as a
`networkx.Graph <https://networkx.github.io/documentation/stable/reference/classes/graph.html>`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third are
``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use
`~astropy.coordinates.TransformGraph.add_transform` instead of this
decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(
func, fromsys, tosys, priority=priority, register_graph=self, **kwargs
)
return func
return deco
def _add_merged_transform(self, fromsys, tosys, *furthersys, priority=1):
"""
Add a single-step transform that encapsulates a multi-step transformation path,
using the transforms that already exist in the graph.
The created transform internally calls the existing transforms. If all of the
transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
This method is primarily useful for defining loopback transformations
(i.e., where ``fromsys`` and the final ``tosys`` are the same).
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform to.
*furthersys : class
Additional coordinate frame classes to transform to in order.
priority : number
The priority of this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Notes
-----
Even though the created transform is a single step in the graph, it
will still internally call the constituent transforms. Thus, there is
no performance benefit for using this created transform.
For Astropy's built-in frames, loopback transformations typically use
`~astropy.coordinates.ICRS` to be safe. Transforming through an inertial
frame ensures that changes in observation time and observer
location/velocity are properly accounted for.
An error will be raised if a direct transform between ``fromsys`` and
``tosys`` already exist.
"""
frames = [fromsys, tosys, *furthersys]
lastsys = frames[-1]
full_path = self.get_transform(fromsys, lastsys)
transforms = [
self.get_transform(frame_a, frame_b)
for frame_a, frame_b in zip(frames[:-1], frames[1:])
]
if None in transforms:
raise ValueError("This transformation path is not possible")
if len(full_path.transforms) == 1:
raise ValueError(
f"A direct transform for {fromsys.__name__}->{lastsys.__name__} already"
" exists"
)
self.add_transform(
fromsys,
lastsys,
CompositeTransform(
transforms, fromsys, lastsys, priority=priority
)._as_single_transform(),
)
@contextmanager
def impose_finite_difference_dt(self, dt):
"""
Context manager to impose a finite-difference time step on all applicable transformations
For each transformation in this transformation graph that has the attribute
``finite_difference_dt``, that attribute is set to the provided value. The only standard
transformation with this attribute is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
Parameters
----------
dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the finite difference.
If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value.
"""
key = "finite_difference_dt"
saved_settings = []
try:
for to_frames in self._graph.values():
for transform in to_frames.values():
if hasattr(transform, key):
old_setting = (transform, key, getattr(transform, key))
saved_settings.append(old_setting)
setattr(transform, key, dt)
yield
finally:
for setting in saved_settings:
setattr(*setting)
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to start from.
tosys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError("fromsys and tosys must be classes")
self.overlapping_frame_attr_names = overlap = []
if hasattr(fromsys, "frame_attributes") and hasattr(tosys, "frame_attributes"):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.frame_attributes:
if from_nm in tosys.frame_attributes:
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : `~astropy.coordinates.TransformGraph` object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary ``tosys.frame_attributes``.
Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError("func must be callable")
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (
len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and sig.VAR_POSITIONAL not in kinds
):
raise ValueError("provided function does not accept two arguments")
self.func = func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError(
f"the transformation function yielded {res} but "
f"should have been of type {self.tosys}"
)
if fromcoord.data.differentials and not res.data.differentials:
warn(
"Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.",
AstropyWarning,
)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""Transormation based on functions using finite difference for velocities.
A coordinate transformation that works like a
`~astropy.coordinates.FunctionTransform`, but computes velocity shifts
based on the finite-difference relative to one of the frame attributes.
Note that the transform function should *not* change the differential at
all in this case, as any differentials will be overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any existing differential.
finite_difference_dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`~astropy.coordinates.FunctionTransform`.
"""
def __init__(
self,
func,
fromsys,
tosys,
priority=1,
register_graph=None,
finite_difference_frameattr_name="obstime",
finite_difference_dt=1 * u.second,
symmetric_finite_difference=True,
):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError(
f"Frame attribute name {value} is not a frame attribute of"
f" {self.fromsys} or {self.tosys}"
)
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import CartesianDifferential, CartesianRepresentation
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt / 2
from_diffless = fromcoord.realize_frame(
fromcoord.data.without_differentials()
)
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (
fromcoord_cart.xyz
+ fromcoord_cart.differentials["s"].d_xyz * halfdt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
backxyz = (
fromcoord_cart.xyz
- fromcoord_cart.differentials["s"].d_xyz * halfdt
)
back = supcall(
fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe
)
else:
fwdxyz = (
fromcoord_cart.xyz + fromcoord_cart.differentials["s"].d_xyz * dt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(
newdiff
)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because `~astropy.coordinates.AffineTransform`
and the matrix transform classes share the ``__call__()`` method, but
differ in how they generate the affine parameters.
`~astropy.coordinates.StaticMatrixTransform` passes in a matrix stored as a
class attribute, and both of the matrix transforms pass in ``None`` for the
offset. Hence, user subclasses would likely want to subclass this (rather
than `~astropy.coordinates.AffineTransform`) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (
CartesianDifferential,
RadialDifferential,
SphericalCosLatDifferential,
SphericalDifferential,
UnitSphericalRepresentation,
)
data = fromcoord.data
has_velocity = "s" in data.differentials
# Bail out if no transform is actually requested
if matrix is None and offset is None:
return data
# list of unit differentials
_unit_diffs = (
SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential,
)
unit_vel_diff = has_velocity and isinstance(
data.differentials["s"], _unit_diffs
)
rad_vel_diff = has_velocity and isinstance(
data.differentials["s"], RadialDifferential
)
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError(
"Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {data.__class__})"
)
elif (
has_velocity
and (unit_vel_diff or rad_vel_diff)
and offset is not None
and "s" in offset.differentials
):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError(
"Velocity information stored on coordinate frame is insufficient to do"
" a full-space velocity transformation (differential class:"
f" {data.differentials['s'].__class__})"
)
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError(
"Representation passed to AffineTransform contains multiple associated"
" differentials. Only a single differential with velocity units is"
f" presently supported (differentials: {data.differentials})."
)
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (
has_velocity
and isinstance(data, UnitSphericalRepresentation)
and not unit_vel_diff
and not rad_vel_diff
):
# retrieve just velocity differential
unit_diff = data.differentials["s"].represent_as(
data.differentials["s"]._unit_differential, data
)
data = data.with_differentials({"s": unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = {
k: diff.represent_as(CartesianDifferential, data)
for k, diff in data.differentials.items()
}
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = rep.without_differentials() + offset.without_differentials()
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials["s"] # already in Cartesian form
if offset is not None and "s" in offset.differentials:
veldiff = veldiff + offset.differentials["s"]
newrep = newrep.with_differentials({"s": veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials["s"]
_unit_cls = fromcoord.data.differentials["s"]._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = {comp: getattr(newdiff, comp) for comp in newdiff.components}
kwargs["d_distance"] = fromcoord.data.differentials["s"].d_distance
diffs = {
"s": fromcoord.data.differentials["s"].__class__(
copy=False, **kwargs
)
}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials["s"].represent_as(
fromcoord.data.differentials["s"].__class__, newrep
)
diffs = {"s": newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials["s"].__class__
newrep = newrep.represent_as(
fromcoord.data.__class__, diff_cls._dimensional_differential
)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials({"s": fromcoord.data.differentials["s"]})
return newrep
def __call__(self, fromcoord, toframe):
params = self._affine_params(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, *params)
return toframe.realize_frame(newrep)
@abstractmethod
def _affine_params(self, fromcoord, toframe):
pass
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(transform_func):
raise TypeError("transform_func is not callable")
self.transform_func = transform_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.transform_func(fromcoord, toframe)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError("Provided matrix is not 3 x 3")
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix, None
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(matrix_func):
raise TypeError("matrix_func is not callable")
self.matrix_func = matrix_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `~astropy.coordinates.CoordinateTransform` object
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `~astropy.coordinates.StaticMatrixTransform`
will be collapsed into a single transformation to speed up the
calculation.
"""
def __init__(
self,
transforms,
fromsys,
tosys,
priority=1,
register_graph=None,
collapse_static_mats=True,
):
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of StaticMatrixTransform's into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if isinstance(lasttrans, StaticMatrixTransform) and isinstance(
currtrans, StaticMatrixTransform
):
newtrans[-1] = StaticMatrixTransform(
currtrans.matrix @ lasttrans.matrix,
lasttrans.fromsys,
currtrans.tosys,
)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `toframe`, or if not there, `fromcoord`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.frame_attributes:
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutable, so copying is not needed
return curr_coord
def _as_single_transform(self):
"""
Return an encapsulated version of the composite transform so that it appears to
be a single transform.
The returned transform internally calls the constituent transforms. If all of
the transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
"""
# Create a list of the transforms including flattening any constituent CompositeTransform
transforms = [
t if not isinstance(t, CompositeTransform) else t._as_single_transform()
for t in self.transforms
]
if all([isinstance(t, BaseAffineTransform) for t in transforms]):
# Check if there may be an origin shift
fixed_origin = all(
isinstance(t, (StaticMatrixTransform, DynamicMatrixTransform))
for t in transforms
)
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return None if fixed_origin else (None, None)
# Create a merged attribute dictionary for any intermediate frames
# For any attributes shared by the "from"/"to" frames, the "to" frame takes
# precedence because this is the same choice implemented in __call__()
merged_attr = {
name: getattr(from_coo, name) for name in from_coo.frame_attributes
}
merged_attr.update(
{
name: getattr(to_frame, name)
for name in to_frame.frame_attributes
}
)
affine_params = (None, None)
# Step through each transform step (frame A -> frame B)
for i, t in enumerate(transforms):
# Extract the relevant attributes for frame A
if i == 0:
# If frame A is actually the initial frame, preserve its attributes
a_attr = {
name: getattr(from_coo, name)
for name in from_coo.frame_attributes
}
else:
a_attr = {
k: v
for k, v in merged_attr.items()
if k in t.fromsys.frame_attributes
}
# Extract the relevant attributes for frame B
b_attr = {
k: v
for k, v in merged_attr.items()
if k in t.tosys.frame_attributes
}
# Obtain the affine parameters for the transform
# Note that we insert some dummy data into frame A because the transformation
# machinery requires there to be data present. Removing that limitation
# is a possible TODO, but some care would need to be taken because some affine
# transforms have branching code depending on the presence of differentials.
next_affine_params = t._affine_params(
t.fromsys(from_coo.data, **a_attr), t.tosys(**b_attr)
)
# Combine the affine parameters with the running set
affine_params = _combine_affine_params(
affine_params, next_affine_params
)
# If there is no origin shift, return only the matrix
return affine_params[0] if fixed_origin else affine_params
# The return type depends on whether there is any origin shift
transform_type = DynamicMatrixTransform if fixed_origin else AffineTransform
else:
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return to_frame.realize_frame(from_coo.data)
return self(from_coo, to_frame)
transform_type = FunctionTransformWithFiniteDifference
return transform_type(
single_transform, self.fromsys, self.tosys, priority=self.priority
)
def _combine_affine_params(params, next_params):
"""
Combine two sets of affine parameters.
The parameters for an affine transformation are a 3 x 3 Cartesian
transformation matrix and a displacement vector, which can include an
attached velocity. Either type of parameter can be ``None``.
"""
M, vec = params
next_M, next_vec = next_params
# Multiply the transformation matrices if they both exist
if M is not None and next_M is not None:
new_M = next_M @ M
else:
new_M = M if M is not None else next_M
if vec is not None:
# Transform the first displacement vector by the second transformation matrix
if next_M is not None:
vec = vec.transform(next_M)
# Calculate the new displacement vector
if next_vec is not None:
if "s" in vec.differentials and "s" in next_vec.differentials:
# Adding vectors with velocities takes more steps
# TODO: Add support in representation.py
new_vec_velocity = vec.differentials["s"] + next_vec.differentials["s"]
new_vec = vec.without_differentials() + next_vec.without_differentials()
new_vec = new_vec.with_differentials({"s": new_vec_velocity})
else:
new_vec = vec + next_vec
else:
new_vec = vec
else:
new_vec = next_vec
return new_M, new_vec
# map class names to colorblind-safe colors
trans_to_color = {}
trans_to_color[AffineTransform] = "#555555" # gray
trans_to_color[FunctionTransform] = "#783001" # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = "#d95f02" # red-ish
trans_to_color[StaticMatrixTransform] = "#7570b3" # blue-ish
trans_to_color[DynamicMatrixTransform] = "#1b9e77" # green-ish
|
a8661e5426a044b980d15e068a349591ee84ca9a59424a14ded6f3fd07d733bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import coordinates, time
from astropy import units as u
from astropy.table import Column, NdarrayMixin, QTable, Table, table_helpers, unique
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyUserWarning
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def test_column_group_by(T1):
for masked in (False, True):
t1 = QTable(T1, masked=masked)
t1a = t1["a"].copy()
# Group by a Column (i.e. numpy array)
t1ag = t1a.group_by(t1["a"])
assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8]))
# Group by a Table
t1ag = t1a.group_by(t1["a", "b"])
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Group by a numpy structured array
t1ag = t1a.group_by(t1["a", "b"].as_array())
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
def test_table_group_by(T1):
"""
Test basic table group_by functionality for possible key types and for
masked/unmasked tables.
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# Group by a single column key specified by name
tg = t1.group_by("a")
assert np.all(tg.groups.indices == np.array([0, 1, 4, 8]))
assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>"
assert str(tg["a"].groups) == "<ColumnGroups indices=[0 1 4 8]>"
# Sorted by 'a' and in original order for rest
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a 0.0 4 4.0",
" 1 b 3.0 5 5.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 c 7.0 0 0.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 a 4.0 3 3.0",
]
assert tg.meta["ta"] == 1
assert tg["c"].meta["a"] == 1
assert tg["c"].description == "column c"
# Group by a table column
tg2 = t1.group_by(t1["a"])
assert tg.pformat() == tg2.pformat()
# Group by two columns spec'd by name
for keys in (["a", "b"], ("a", "b")):
tg = t1.group_by(keys)
assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Sorted by 'a', 'b' and in original order for rest
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a 0.0 4 4.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 1 b 3.0 5 5.0",
" 2 a 4.0 3 3.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 c 7.0 0 0.0",
]
# Group by a Table
tg2 = t1.group_by(t1["a", "b"])
assert tg.pformat() == tg2.pformat()
# Group by a structured array
tg2 = t1.group_by(t1["a", "b"].as_array())
assert tg.pformat() == tg2.pformat()
# Group by a simple ndarray
tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0]))
assert np.all(tg.groups.indices == np.array([0, 4, 7, 8]))
assert tg.pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 2 c 7.0 0 0.0",
" 2 b 6.0 2 2.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 b 5.0 1 1.0",
" 2 a 4.0 3 3.0",
" 1 b 3.0 5 5.0",
" 0 a 0.0 4 4.0",
]
def test_groups_keys(T1):
tg = T1.group_by("a")
keys = tg.groups.keys
assert keys.dtype.names == ("a",)
assert np.all(keys["a"] == np.array([0, 1, 2]))
tg = T1.group_by(["a", "b"])
keys = tg.groups.keys
assert keys.dtype.names == ("a", "b")
assert np.all(keys["a"] == np.array([0, 1, 1, 2, 2, 2]))
assert np.all(keys["b"] == np.array(["a", "a", "b", "a", "b", "c"]))
# Grouping by Column ignores column name
tg = T1.group_by(T1["b"])
keys = tg.groups.keys
assert keys.dtype.names is None
def test_groups_iterator(T1):
tg = T1.group_by("a")
for ii, group in enumerate(tg.groups):
assert group.pformat() == tg.groups[ii].pformat()
assert group["a"][0] == tg["a"][tg.groups.indices[ii]]
def test_grouped_copy(T1):
"""
Test that copying a table or column copies the groups properly
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
tg = t1.group_by("a")
tgc = tg.copy()
assert np.all(tgc.groups.indices == tg.groups.indices)
assert np.all(tgc.groups.keys == tg.groups.keys)
tac = tg["a"].copy()
assert np.all(tac.groups.indices == tg["a"].groups.indices)
c1 = t1["a"].copy()
gc1 = c1.group_by(t1["a"])
gc1c = gc1.copy()
assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8]))
def test_grouped_slicing(T1):
"""
Test that slicing a table removes previous grouping
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by("a")
tg2 = tg[3:5]
assert np.all(tg2.groups.indices == np.array([0, len(tg2)]))
assert tg2.groups.keys is None
def test_group_column_from_table(T1):
"""
Group a column that is part of a table
"""
cg = T1["c"].group_by(np.array(T1["a"]))
assert np.all(cg.groups.keys == np.array([0, 1, 2]))
assert np.all(cg.groups.indices == np.array([0, 1, 4, 8]))
def test_table_groups_mask_index(T1):
"""
Use boolean mask as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
t2 = t1.groups[np.array([True, False, True])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_table_groups_array_index(T1):
"""
Use numpy array as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
t2 = t1.groups[np.array([0, 2])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_table_groups_slicing(T1):
"""
Test that slicing table groups works
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by("a")
# slice(0, 2)
t2 = t1.groups[0:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 1]))
# slice(1, 2)
t2 = t1.groups[1:2]
assert len(t2.groups) == 1
assert t2.groups[0].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys["a"] == np.array([1]))
# slice(0, 3, 2)
t2 = t1.groups[0:3:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
def test_grouped_item_access(T1):
"""
Test that column slicing preserves grouping
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by("a")
tgs = tg["a", "c", "d"]
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
tgs = tg["c", "d"]
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [
" c d ",
"---- ---",
" 0.0 4",
" 6.0 18",
"22.0 6",
]
def test_mutable_operations(T1):
"""
Operations like adding or deleting a row should removing grouping,
but adding or removing or renaming a column should retain grouping.
"""
for masked in (False, True):
t1 = QTable(T1, masked=masked)
# add row
tg = t1.group_by("a")
tg.add_row((0, "a", 3.0, 4, 4 * u.m))
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# remove row
tg = t1.group_by("a")
tg.remove_row(4)
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# add column
tg = t1.group_by("a")
indices = tg.groups.indices.copy()
tg.add_column(Column(name="e", data=np.arange(len(tg))))
assert np.all(tg.groups.indices == indices)
assert np.all(tg["e"].groups.indices == indices)
assert np.all(tg["e"].groups.keys == tg.groups.keys)
# remove column (not key column)
tg = t1.group_by("a")
tg.remove_column("b")
assert np.all(tg.groups.indices == indices)
# Still has original key col names
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["a"].groups.indices == indices)
# remove key column
tg = t1.group_by("a")
tg.remove_column("a")
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["b"].groups.indices == indices)
# rename key column
tg = t1.group_by("a")
tg.rename_column("a", "aa")
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ("a",)
assert np.all(tg["aa"].groups.indices == indices)
def test_group_by_masked(T1):
t1m = QTable(T1, masked=True)
t1m["c"].mask[4] = True
t1m["d"].mask[5] = True
assert t1m.group_by("a").pformat() == [
" a b c d q ",
" m ",
"--- --- --- --- ---",
" 0 a -- 4 4.0",
" 1 b 3.0 -- 5.0",
" 1 a 2.0 6 6.0",
" 1 a 1.0 7 7.0",
" 2 c 7.0 0 0.0",
" 2 b 5.0 1 1.0",
" 2 b 6.0 2 2.0",
" 2 a 4.0 3 3.0",
]
def test_group_by_errors(T1):
"""
Appropriate errors get raised.
"""
# Bad column name as string
with pytest.raises(ValueError):
T1.group_by("f")
# Bad column names in list
with pytest.raises(ValueError):
T1.group_by(["f", "g"])
# Wrong length array
with pytest.raises(ValueError):
T1.group_by(np.array([1, 2]))
# Wrong type
with pytest.raises(TypeError):
T1.group_by(None)
# Masked key column
t1 = QTable(T1, masked=True)
t1["a"].mask[4] = True
with pytest.raises(ValueError):
t1.group_by("a")
def test_groups_keys_meta(T1):
"""
Make sure the keys meta['grouped_by_table_cols'] is working.
"""
# Group by column in this table
tg = T1.group_by("a")
assert tg.groups.keys.meta["grouped_by_table_cols"] is True
assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is True
assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is True
assert (
tg["d"]
.groups[np.array([False, True, True])]
.groups.keys.meta["grouped_by_table_cols"]
is True
)
# Group by external Table
tg = T1.group_by(T1["a", "b"])
assert tg.groups.keys.meta["grouped_by_table_cols"] is False
assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is False
assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is False
# Group by external numpy array
tg = T1.group_by(T1["a", "b"].as_array())
assert not hasattr(tg.groups.keys, "meta")
assert not hasattr(tg["c"].groups.keys, "meta")
# Group by Column
tg = T1.group_by(T1["a"])
assert "grouped_by_table_cols" not in tg.groups.keys.meta
assert "grouped_by_table_cols" not in tg["c"].groups.keys.meta
def test_table_aggregate(T1):
"""
Aggregate a table
"""
# Table with only summable cols
t1 = T1["a", "c", "d"]
tg = t1.group_by("a")
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
# Reverts to default groups
assert np.all(tga.groups.indices == np.array([0, 3]))
assert tga.groups.keys is None
# metadata survives
assert tga.meta["ta"] == 1
assert tga["c"].meta["a"] == 1
assert tga["c"].description == "column c"
# Aggregate with np.sum with masked elements. This results
# in one group with no elements, hence a nan result and conversion
# to float for the 'd' column.
t1m = QTable(T1, masked=True)
t1m["c"].mask[4:6] = True
t1m["d"].mask[4:6] = True
tg = t1m.group_by("a")
with pytest.warns(UserWarning, match="converting a masked element to nan"):
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d q ",
" m ",
"--- ---- ---- ----",
" 0 nan nan 4.0",
" 1 3.0 13.0 18.0",
" 2 22.0 6.0 6.0",
]
# Aggregate with np.sum with masked elements, but where every
# group has at least one remaining (unmasked) element. Then
# the int column stays as an int.
t1m = QTable(t1, masked=True)
t1m["c"].mask[5] = True
t1m["d"].mask[5] = True
tg = t1m.group_by("a")
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 3.0 13",
" 2 22.0 6",
]
# Aggregate with a column type that cannot by supplied to the aggregating
# function. This raises a warning but still works.
tg = T1.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"):
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [
" a c d q ",
" m ",
"--- ---- --- ----",
" 0 0.0 4 4.0",
" 1 6.0 18 18.0",
" 2 22.0 6 6.0",
]
def test_table_aggregate_reduceat(T1):
"""
Aggregate table with functions which have a reduceat method
"""
# Comparison functions without reduceat
def np_mean(x):
return np.mean(x)
def np_sum(x):
return np.sum(x)
def np_add(x):
return np.add(x)
# Table with only summable cols
t1 = T1["a", "c", "d"]
tg = t1.group_by("a")
# Comparison
tga_r = tg.groups.aggregate(np.sum)
tga_a = tg.groups.aggregate(np.add)
tga_n = tg.groups.aggregate(np_sum)
assert np.all(tga_r == tga_n)
assert np.all(tga_a == tga_n)
assert tga_n.pformat() == [
" a c d ",
"--- ---- ---",
" 0 0.0 4",
" 1 6.0 18",
" 2 22.0 6",
]
tga_r = tg.groups.aggregate(np.mean)
tga_n = tg.groups.aggregate(np_mean)
assert np.all(tga_r == tga_n)
assert tga_n.pformat() == [
" a c d ",
"--- --- ---",
" 0 0.0 4.0",
" 1 2.0 6.0",
" 2 5.5 1.5",
]
# Binary ufunc np_add should raise warning without reduceat
t2 = T1["a", "c"]
tg = t2.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"):
tga = tg.groups.aggregate(np_add)
assert tga.pformat() == [" a ", "---", " 0", " 1", " 2"]
def test_column_aggregate(T1):
"""
Aggregate a single table column
"""
for masked in (False, True):
tg = QTable(T1, masked=masked).group_by("a")
tga = tg["c"].groups.aggregate(np.sum)
assert tga.pformat() == [" c ", "----", " 0.0", " 6.0", "22.0"]
@pytest.mark.skipif(
not NUMPY_LT_1_22 and NUMPY_LT_1_22_1,
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_column_aggregate_f8():
"""https://github.com/astropy/astropy/issues/12706"""
# Just want to make sure it does not crash again.
for masked in (False, True):
tg = Table({"a": np.arange(2, dtype=">f8")}, masked=masked).group_by("a")
tga = tg["a"].groups.aggregate(np.sum)
assert tga.pformat() == [" a ", "---", "0.0", "1.0"]
def test_table_filter():
"""
Table groups filtering
"""
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
# Negative value in 'a' column should not filter because it is a key col
t = Table.read(
[
" a c d",
" -2 7.0 0",
" -2 5.0 1",
" 0 0.0 4",
" 1 3.0 5",
" 1 2.0 -6",
" 1 1.0 7",
" 3 3.0 5",
" 3 -2.0 6",
" 3 1.0 7",
],
format="ascii",
)
tg = t.group_by("a")
t2 = tg.groups.filter(all_positive)
assert t2.groups[0].pformat() == [
" a c d ",
"--- --- ---",
" -2 7.0 0",
" -2 5.0 1",
]
assert t2.groups[1].pformat() == [" a c d ", "--- --- ---", " 0 0.0 4"]
def test_column_filter():
"""
Table groups filtering
"""
def all_positive(column):
if np.any(column < 0):
return False
return True
# Negative value in 'a' column should not filter because it is a key col
t = Table.read(
[
" a c d",
" -2 7.0 0",
" -2 5.0 1",
" 0 0.0 4",
" 1 3.0 5",
" 1 2.0 -6",
" 1 1.0 7",
" 3 3.0 5",
" 3 -2.0 6",
" 3 1.0 7",
],
format="ascii",
)
tg = t.group_by("a")
c2 = tg["c"].groups.filter(all_positive)
assert len(c2.groups) == 3
assert c2.groups[0].pformat() == [" c ", "---", "7.0", "5.0"]
assert c2.groups[1].pformat() == [" c ", "---", "0.0"]
assert c2.groups[2].pformat() == [" c ", "---", "3.0", "2.0", "1.0"]
def test_group_mixins():
"""
Test grouping a table with mixin columns
"""
# Setup mixins
idx = np.arange(4)
x = np.array([3.0, 1.0, 2.0, 1.0])
q = x * u.m
lon = coordinates.Longitude(x * u.deg)
lat = coordinates.Latitude(x * u.deg)
# For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision)
tm = time.Time(2000, format="jyear") + time.TimeDelta(x * 1e-10, format="sec")
sc = coordinates.SkyCoord(ra=lon, dec=lat)
aw = table_helpers.ArrayWrapper(x)
nd = np.array([(3, "c"), (1, "a"), (2, "b"), (1, "a")], dtype="<i4,|S1").view(
NdarrayMixin
)
qt = QTable(
[idx, x, q, lon, lat, tm, sc, aw, nd],
names=["idx", "x", "q", "lon", "lat", "tm", "sc", "aw", "nd"],
)
# Test group_by with each supported mixin type
mixin_keys = ["x", "q", "lon", "lat", "tm", "sc", "aw", "nd"]
for key in mixin_keys:
qtg = qt.group_by(key)
# Test that it got the sort order correct
assert np.all(qtg["idx"] == [1, 3, 2, 0])
# Test that the groups are right
# Note: skip testing SkyCoord column because that doesn't have equality
for name in ["x", "q", "lon", "lat", "tm", "aw", "nd"]:
assert np.all(qt[name][[1, 3]] == qtg.groups[0][name])
assert np.all(qt[name][[2]] == qtg.groups[1][name])
assert np.all(qt[name][[0]] == qtg.groups[2][name])
# Test that unique also works with mixins since most of the work is
# done with group_by(). This is using *every* mixin as key.
uqt = unique(qt, keys=mixin_keys)
assert len(uqt) == 3
assert np.all(uqt["idx"] == [1, 2, 0])
assert np.all(uqt["x"] == [1.0, 2.0, 3.0])
# Column group_by() with mixins
idxg = qt["idx"].group_by(qt[mixin_keys])
assert np.all(idxg == [1, 3, 2, 0])
@pytest.mark.parametrize(
"col",
[
time.TimeDelta([1, 2], format="sec"),
time.Time([1, 2], format="cxcsec"),
coordinates.SkyCoord([1, 2], [3, 4], unit="deg,deg"),
],
)
def test_group_mixins_unsupported(col):
"""Test that aggregating unsupported mixins produces a warning only"""
t = Table([[1, 1], [3, 4], col], names=["a", "b", "mix"])
tg = t.group_by("a")
with pytest.warns(AstropyUserWarning, match="Cannot aggregate column 'mix'"):
tg.groups.aggregate(np.sum)
|
6cb401fc3c6508ba50f23386f8feb610d9c5470df79e6f560e626b522391f94a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test sky projections defined in WCS Paper II"""
# pylint: disable=invalid-name, no-member
import os
import unittest.mock as mk
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal
from astropy import units as u
from astropy import wcs
from astropy.io import fits
from astropy.modeling import projections
from astropy.modeling.parameters import InputParameterError
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import get_pkg_data_filename
def test_new_wcslib_projections():
# Test that we are aware of all WCSLIB projections.
# Detect if a new WCSLIB release introduced new projections.
assert not set(wcs.PRJ_CODES).symmetric_difference(
projections.projcodes + projections._NOT_SUPPORTED_PROJ_CODES
)
def test_Projection_properties():
projection = projections.Sky2Pix_PlateCarree()
assert projection.n_inputs == 2
assert projection.n_outputs == 2
PIX_COORDINATES = [-10, 30]
MAPS_DIR = os.path.join(os.pardir, os.pardir, "wcs", "tests", "data", "maps")
pars = [(x,) for x in projections.projcodes]
# There is no groundtruth file for the XPH projection available here:
# https://www.atnf.csiro.au/people/mcalabre/WCS/example_data.html
pars.remove(("XPH",))
@pytest.mark.parametrize(("code",), pars)
def test_Sky2Pix(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f"PV2_{i + 1}"
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0.0, 0.0]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout["world"], 1)["pixcrd"]
model = getattr(projections, "Sky2Pix_" + code)
tinv = model(*params)
x, y = tinv(wcslibout["phi"], wcslibout["theta"])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
assert isinstance(tinv.prjprm, wcs.Prjprm)
@pytest.mark.parametrize(("code",), pars)
def test_Pix2Sky(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f"PV2_{i + 1}"
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0.0, 0.0]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout["phi"]
wcs_theta = wcslibout["theta"]
model = getattr(projections, "Pix2Sky_" + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
@pytest.mark.parametrize(("code",), pars)
def test_Sky2Pix_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f"PV2_{i + 1}"
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0.0, 0.0]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout["world"], 1)["pixcrd"]
model = getattr(projections, "Sky2Pix_" + code)
tinv = model(*params)
x, y = tinv(wcslibout["phi"] * u.deg, wcslibout["theta"] * u.deg)
assert_quantity_allclose(x, wcs_pix[:, 0] * u.deg)
assert_quantity_allclose(y, wcs_pix[:, 1] * u.deg)
@pytest.mark.parametrize(("code",), pars)
def test_Pix2Sky_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f"PV2_{i + 1}"
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0.0, 0.0]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout["phi"]
wcs_theta = wcslibout["theta"]
model = getattr(projections, "Pix2Sky_" + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES * u.deg)
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.rad))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.arcmin))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
@pytest.mark.parametrize(("code",), pars)
def test_projection_default(code):
"""Check astropy model eval with default parameters"""
# Just makes sure that the default parameter values are reasonable
# and accepted by wcslib.
model = getattr(projections, "Sky2Pix_" + code)
tinv = model()
x, y = tinv(45, 45)
model = getattr(projections, "Pix2Sky_" + code)
tinv = model()
x, y = tinv(0, 0)
class TestZenithalPerspective:
"""Test Zenithal Perspective projection"""
def setup_class(self):
ID = "AZP"
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{ID}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0.0, 0.0])
self.wazp.wcs.crval = np.array([0.0, 0.0])
self.wazp.wcs.cdelt = np.array([1.0, 1.0])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_ZenithalPerspective(*self.pv_kw)
def test_AZP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout["phi"]
wcs_theta = wcslibout["theta"]
phi, theta = self.azp(-10, 30)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
def test_AZP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout["world"], 1)["pixcrd"]
x, y = self.azp.inverse(wcslibout["phi"], wcslibout["theta"])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
def test_validate(self):
MESSAGE = r"Zenithal perspective projection is not defined for mu = -1"
with pytest.raises(InputParameterError, match=MESSAGE):
projections.Pix2Sky_ZenithalPerspective(-1)
with pytest.raises(InputParameterError, match=MESSAGE):
projections.Sky2Pix_ZenithalPerspective(-1)
with pytest.raises(InputParameterError, match=MESSAGE):
projections.Pix2Sky_SlantZenithalPerspective(-1)
with pytest.raises(InputParameterError, match=MESSAGE):
projections.Sky2Pix_SlantZenithalPerspective(-1)
class TestCylindricalPerspective:
"""Test cylindrical perspective projection"""
def setup_class(self):
ID = "CYP"
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{ID}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0.0, 0.0])
self.wazp.wcs.crval = np.array([0.0, 0.0])
self.wazp.wcs.cdelt = np.array([1.0, 1.0])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_CylindricalPerspective(*self.pv_kw)
def test_CYP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout["phi"]
wcs_theta = wcslibout["theta"]
phi, theta = self.azp(-10, 30)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
def test_CYP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout["world"], 1)["pixcrd"]
x, y = self.azp.inverse(wcslibout["phi"], wcslibout["theta"])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
def test_validate(self):
MESSAGE = r"CYP projection is not defined for .*"
MESSAGE0 = r"CYP projection is not defined for mu = -lambda"
MESSAGE1 = r"CYP projection is not defined for lambda = -mu"
# Pix2Sky_CylindricalPerspective
with pytest.raises(InputParameterError, match=MESSAGE):
projections.Pix2Sky_CylindricalPerspective(1, -1)
with pytest.raises(InputParameterError, match=MESSAGE):
projections.Pix2Sky_CylindricalPerspective(-1, 1)
model = projections.Pix2Sky_CylindricalPerspective()
with pytest.raises(InputParameterError, match=MESSAGE0):
model.mu = -1
with pytest.raises(InputParameterError, match=MESSAGE1):
model.lam = -1
# Sky2Pix_CylindricalPerspective
with pytest.raises(InputParameterError, match=MESSAGE):
projections.Sky2Pix_CylindricalPerspective(1, -1)
with pytest.raises(InputParameterError, match=MESSAGE):
projections.Sky2Pix_CylindricalPerspective(-1, 1)
model = projections.Sky2Pix_CylindricalPerspective()
with pytest.raises(InputParameterError, match=MESSAGE0):
model.mu = -1
with pytest.raises(InputParameterError, match=MESSAGE1):
model.lam = -1
def test_AffineTransformation2D():
# Simple test with a scale and translation
model = projections.AffineTransformation2D(
matrix=[[2, 0], [0, 2]], translation=[1, 1]
)
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
new_rect = np.vstack(model(x, y)).T
assert np.all(new_rect == [[1, 1], [3, 1], [1, 7], [3, 7]])
# Matrix validation error
MESSAGE = r"Expected transformation matrix to be a 2x2 array"
with pytest.raises(InputParameterError, match=MESSAGE):
model.matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# Translation validation error
MESSAGE = (
r"Expected translation vector to be a 2 element row or column vector array"
)
with pytest.raises(InputParameterError, match=MESSAGE):
model.translation = [1, 2, 3]
with pytest.raises(InputParameterError, match=MESSAGE):
model.translation = [[1], [2]]
with pytest.raises(InputParameterError, match=MESSAGE):
model.translation = [[1, 2, 3]]
# Incompatible shape error
a = np.array([[1], [2], [3], [4]])
b = a.ravel()
with mk.patch.object(np, "vstack", autospec=True, side_effect=[a, b]) as mk_vstack:
MESSAGE = r"Incompatible input shapes"
with pytest.raises(ValueError, match=MESSAGE):
model(x, y)
with pytest.raises(ValueError, match=MESSAGE):
model(x, y)
assert mk_vstack.call_count == 2
# Input shape evaluation error
x = np.array([1, 2])
y = np.array([1, 2, 3])
MESSAGE = r"Expected input arrays to have the same shape"
with pytest.raises(ValueError, match=MESSAGE):
model.evaluate(x, y, model.matrix, model.translation)
def test_AffineTransformation2D_inverse():
# Test non-invertible model
model1 = projections.AffineTransformation2D(matrix=[[1, 1], [1, 1]])
MESSAGE = r"Transformation matrix is singular; .* model does not have an inverse"
with pytest.raises(InputParameterError, match=MESSAGE):
model1.inverse
model2 = projections.AffineTransformation2D(
matrix=[[1.2, 3.4], [5.6, 7.8]], translation=[9.1, 10.11]
)
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
x_new, y_new = model2.inverse(*model2(x, y))
assert_allclose([x, y], [x_new, y_new], atol=1e-10)
model3 = projections.AffineTransformation2D(
matrix=[[1.2, 3.4], [5.6, 7.8]] * u.m, translation=[9.1, 10.11] * u.m
)
x_new, y_new = model3.inverse(*model3(x * u.m, y * u.m))
assert_allclose([x, y], [x_new, y_new], atol=1e-10)
model4 = projections.AffineTransformation2D(
matrix=[[1.2, 3.4], [5.6, 7.8]] * u.m, translation=[9.1, 10.11] * u.km
)
MESSAGE = r"matrix and translation must have the same units"
with pytest.raises(ValueError, match=MESSAGE):
model4.inverse(*model4(x * u.m, y * u.m))
def test_c_projection_striding():
# This is just a simple test to make sure that the striding is
# handled correctly in the projection C extension
coords = np.arange(10).reshape((5, 2))
model = projections.Sky2Pix_ZenithalPerspective(2, 30)
phi, theta = model(coords[:, 0], coords[:, 1])
assert_almost_equal(phi, [0.0, 2.2790416, 4.4889294, 6.6250643, 8.68301])
assert_almost_equal(
theta, [-76.4816918, -75.3594654, -74.1256332, -72.784558, -71.3406629]
)
def test_c_projections_shaped():
nx, ny = (5, 2)
x = np.linspace(0, 1, nx)
y = np.linspace(0, 1, ny)
xv, yv = np.meshgrid(x, y)
model = projections.Pix2Sky_TAN()
phi, theta = model(xv, yv)
assert_allclose(
phi,
[
[0.0, 90.0, 90.0, 90.0, 90.0],
[180.0, 165.96375653, 153.43494882, 143.13010235, 135.0],
],
)
assert_allclose(
theta,
[
[90.0, 89.75000159, 89.50001269, 89.25004283, 89.00010152],
[89.00010152, 88.96933478, 88.88210788, 88.75019826, 88.58607353],
],
)
def test_affine_with_quantities():
x = 1
y = 2
xdeg = (x * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix))
ydeg = (y * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix))
xpix = x * u.pix
ypix = y * u.pix
# test affine with matrix only
qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg)
MESSAGE = (
r"To use AffineTransformation with quantities, both matrix and unit need to be"
r" quantities"
)
with pytest.raises(ValueError, match=MESSAGE):
qx1, qy1 = qaff(
xpix,
ypix,
equivalencies={
"x": u.pixel_scale(2.5 * u.deg / u.pix),
"y": u.pixel_scale(2.5 * u.deg / u.pix),
},
)
# test affine with matrix and translation
qaff = projections.AffineTransformation2D(
matrix=[[1, 2], [2, 1]] * u.deg, translation=[1, 2] * u.deg
)
qx1, qy1 = qaff(
xpix,
ypix,
equivalencies={
"x": u.pixel_scale(2.5 * u.deg / u.pix),
"y": u.pixel_scale(2.5 * u.deg / u.pix),
},
)
aff = projections.AffineTransformation2D(
matrix=[[1, 2], [2, 1]], translation=[1, 2]
)
x1, y1 = aff(xdeg.value, ydeg.value)
assert_quantity_allclose(qx1, x1 * u.deg)
assert_quantity_allclose(qy1, y1 * u.deg)
# test the case of WCS PC and CDELT transformations
pc = np.array(
[
[0.86585778922708, 0.50029020461607],
[-0.50029020461607, 0.86585778922708],
]
)
cdelt = np.array(
[
[1, 3.0683055555556e-05],
[3.0966944444444e-05, 1],
]
)
matrix = cdelt * pc
qaff = projections.AffineTransformation2D(
matrix=matrix * u.deg, translation=[0, 0] * u.deg
)
inv_matrix = np.linalg.inv(matrix)
inv_qaff = projections.AffineTransformation2D(
matrix=inv_matrix * u.pix, translation=[0, 0] * u.pix
)
qaff.inverse = inv_qaff
qx1, qy1 = qaff(
xpix,
ypix,
equivalencies={
"x": u.pixel_scale(1 * u.deg / u.pix),
"y": u.pixel_scale(1 * u.deg / u.pix),
},
)
x1, y1 = qaff.inverse(
qx1,
qy1,
equivalencies={
"x": u.pixel_scale(1 * u.deg / u.pix),
"y": u.pixel_scale(1 * u.deg / u.pix),
},
)
assert_quantity_allclose(x1, xpix)
assert_quantity_allclose(y1, ypix)
def test_Pix2Sky_ZenithalPerspective_inverse():
model = projections.Pix2Sky_ZenithalPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ZenithalPerspective)
assert inverse.mu == model.mu == 2
assert_allclose(inverse.gamma, model.gamma)
assert_allclose(inverse.gamma, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ZenithalPerspective_inverse():
model = projections.Sky2Pix_ZenithalPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_AZP)
assert inverse.mu == model.mu == 2
assert_allclose(inverse.gamma, model.gamma)
assert_allclose(inverse.gamma, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_SlantZenithalPerspective_inverse():
model = projections.Pix2Sky_SlantZenithalPerspective(2, 30, 40)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_SlantZenithalPerspective)
assert inverse.mu == model.mu == 2
assert_allclose(inverse.phi0, model.phi0)
assert_allclose(inverse.theta0, model.theta0)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_SlantZenithalPerspective_inverse():
model = projections.Sky2Pix_SlantZenithalPerspective(2, 30, 40)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_SlantZenithalPerspective)
assert inverse.mu == model.mu == 2
assert_allclose(inverse.phi0, model.phi0)
assert_allclose(inverse.theta0, model.theta0)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Gnomonic_inverse():
model = projections.Pix2Sky_Gnomonic()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Gnomonic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Gnomonic_inverse():
model = projections.Sky2Pix_Gnomonic()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Gnomonic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Stereographic_inverse():
model = projections.Pix2Sky_Stereographic()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Stereographic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Stereographic_inverse():
model = projections.Sky2Pix_Stereographic()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Stereographic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_SlantOrthographic_inverse():
model = projections.Pix2Sky_SlantOrthographic(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_SlantOrthographic)
assert inverse.xi == model.xi == 2
assert inverse.eta == model.eta == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-8)
assert_allclose(b, y, atol=1e-8)
def test_Sky2Pix_SlantOrthographic_inverse():
model = projections.Sky2Pix_SlantOrthographic(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_SlantOrthographic)
assert inverse.xi == model.xi == 2
assert inverse.eta == model.eta == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-8)
assert_allclose(b, y, atol=1e-8)
def test_Pix2Sky_ZenithalEquidistant_inverse():
model = projections.Pix2Sky_ZenithalEquidistant()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ZenithalEquidistant)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ZenithalEquidistant_inverse():
model = projections.Sky2Pix_ZenithalEquidistant()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ZenithalEquidistant)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ZenithalEqualArea_inverse():
model = projections.Pix2Sky_ZenithalEqualArea()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ZenithalEqualArea)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ZenithalEqualArea_inverse():
model = projections.Sky2Pix_ZenithalEqualArea()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ZenithalEqualArea)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Airy_inverse():
model = projections.Pix2Sky_Airy(30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Airy)
assert inverse.theta_b == model.theta_b == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Airy_inverse():
model = projections.Sky2Pix_Airy(30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Airy)
assert inverse.theta_b == model.theta_b == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_CylindricalPerspective_inverse():
model = projections.Pix2Sky_CylindricalPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_CylindricalPerspective)
assert inverse.mu == model.mu == 2
assert inverse.lam == model.lam == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_CylindricalPerspective_inverse():
model = projections.Sky2Pix_CylindricalPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_CylindricalPerspective)
assert inverse.mu == model.mu == 2
assert inverse.lam == model.lam == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_CylindricalEqualArea_inverse():
model = projections.Pix2Sky_CylindricalEqualArea(0.567)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_CylindricalEqualArea)
assert inverse.lam == model.lam == 0.567
def test_Sky2Pix_CylindricalEqualArea_inverse():
model = projections.Sky2Pix_CylindricalEqualArea(0.765)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_CylindricalEqualArea)
assert inverse.lam == model.lam == 0.765
def test_Pix2Sky_PlateCarree_inverse():
model = projections.Pix2Sky_PlateCarree()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_PlateCarree)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_PlateCarree_inverse():
model = projections.Sky2Pix_PlateCarree()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_PlateCarree)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Mercator_inverse():
model = projections.Pix2Sky_Mercator()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Mercator)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Mercator_inverse():
model = projections.Sky2Pix_Mercator()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Mercator)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_SansonFlamsteed_inverse():
model = projections.Pix2Sky_SansonFlamsteed()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_SansonFlamsteed)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_SansonFlamsteed_inverse():
model = projections.Sky2Pix_SansonFlamsteed()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_SansonFlamsteed)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Parabolic_inverse():
model = projections.Pix2Sky_Parabolic()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Parabolic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Parabolic_inverse():
model = projections.Sky2Pix_Parabolic()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Parabolic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Molleweide_inverse():
model = projections.Pix2Sky_Molleweide()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Molleweide)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Molleweide_inverse():
model = projections.Sky2Pix_Molleweide()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Molleweide)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_HammerAitoff_inverse():
model = projections.Pix2Sky_HammerAitoff()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_HammerAitoff)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_HammerAitoff_inverse():
model = projections.Sky2Pix_HammerAitoff()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_HammerAitoff)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ConicPerspective_inverse():
model = projections.Pix2Sky_ConicPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ConicPerspective)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ConicPerspective_inverse():
model = projections.Sky2Pix_ConicPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ConicPerspective)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ConicEqualArea_inverse():
model = projections.Pix2Sky_ConicEqualArea(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ConicEqualArea)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ConicEqualArea_inverse():
model = projections.Sky2Pix_ConicEqualArea(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ConicEqualArea)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ConicEquidistant_inverse():
model = projections.Pix2Sky_ConicEquidistant(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ConicEquidistant)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ConicEquidistant_inverse():
model = projections.Sky2Pix_ConicEquidistant(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ConicEquidistant)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ConicOrthomorphic_inverse():
model = projections.Pix2Sky_ConicOrthomorphic(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ConicOrthomorphic)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ConicOrthomorphic_inverse():
model = projections.Sky2Pix_ConicOrthomorphic(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ConicOrthomorphic)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_BonneEqualArea_inverse():
model = projections.Pix2Sky_BonneEqualArea(2)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_BonneEqualArea)
assert inverse.theta1 == model.theta1 == 2
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_BonneEqualArea_inverse():
model = projections.Sky2Pix_BonneEqualArea(2)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_BonneEqualArea)
assert inverse.theta1 == model.theta1 == 2
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Polyconic_inverse():
model = projections.Pix2Sky_Polyconic()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Polyconic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Polyconic_inverse():
model = projections.Sky2Pix_Polyconic()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Polyconic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_TangentialSphericalCube_inverse():
model = projections.Pix2Sky_TangentialSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_TangentialSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_TangentialSphericalCube_inverse():
model = projections.Sky2Pix_TangentialSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_TangentialSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_COBEQuadSphericalCube_inverse():
model = projections.Pix2Sky_COBEQuadSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_COBEQuadSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-3)
assert_allclose(b, y, atol=1e-3)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-3)
assert_allclose(b, y, atol=1e-3)
def test_Sky2Pix_COBEQuadSphericalCube_inverse():
model = projections.Sky2Pix_COBEQuadSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_COBEQuadSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-3)
assert_allclose(b, y, atol=1e-3)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-3)
assert_allclose(b, y, atol=1e-3)
def test_Pix2Sky_QuadSphericalCube_inverse():
model = projections.Pix2Sky_QuadSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_QuadSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_QuadSphericalCube_inverse():
model = projections.Sky2Pix_QuadSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_QuadSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_HEALPix_inverse():
model = projections.Pix2Sky_HEALPix(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_HEALPix)
assert inverse.H == model.H == 2
assert inverse.X == model.X == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_HEALPix_inverse():
model = projections.Sky2Pix_HEALPix(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_HEALPix)
assert inverse.H == model.H == 2
assert inverse.X == model.X == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_HEALPixPolar_inverse():
model = projections.Pix2Sky_HEALPixPolar()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_HEALPixPolar)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_HEALPixPolar_inverse():
model = projections.Sky2Pix_HEALPixPolar()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_HEALPixPolar)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
|
86425870befb9805bc86904dab58f0964082f5555844bcdf3f2f629f80da8b58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import unittest.mock as mk
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import SpectralCoord
from astropy.modeling.bounding_box import (
CompoundBoundingBox,
ModelBoundingBox,
_BaseInterval,
_BaseSelectorArgument,
_BoundingDomain,
_ignored_interval,
_Interval,
_SelectorArgument,
_SelectorArguments,
)
from astropy.modeling.core import Model, fix_inputs
from astropy.modeling.models import (
Gaussian1D,
Gaussian2D,
Identity,
Polynomial2D,
Scale,
Shift,
)
class Test_Interval:
def test_create(self):
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
assert isinstance(interval, _BaseInterval)
assert interval.lower == lower
assert interval.upper == upper
assert interval == (lower, upper)
assert interval.__repr__() == f"Interval(lower={lower}, upper={upper})"
def test_copy(self):
interval = _Interval(0.5, 1.5)
copy = interval.copy()
assert interval == copy
assert id(interval) != id(copy)
# Same float values have will have same id
assert interval.lower == copy.lower
assert id(interval.lower) == id(copy.lower)
# Same float values have will have same id
assert interval.upper == copy.upper
assert id(interval.upper) == id(copy.upper)
def test__validate_shape(self):
MESSAGE = r"An interval must be some sort of sequence of length 2"
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
# Passes (2,)
interval._validate_shape((1, 2))
interval._validate_shape([1, 2])
interval._validate_shape((1 * u.m, 2 * u.m))
interval._validate_shape([1 * u.m, 2 * u.m])
# Passes (1, 2)
interval._validate_shape(((1, 2),))
interval._validate_shape(([1, 2],))
interval._validate_shape([(1, 2)])
interval._validate_shape([[1, 2]])
interval._validate_shape(((1 * u.m, 2 * u.m),))
interval._validate_shape(([1 * u.m, 2 * u.m],))
interval._validate_shape([(1 * u.m, 2 * u.m)])
interval._validate_shape([[1 * u.m, 2 * u.m]])
# Passes (2, 0)
interval._validate_shape((mk.MagicMock(), mk.MagicMock()))
interval._validate_shape([mk.MagicMock(), mk.MagicMock()])
# Passes with array inputs:
interval._validate_shape((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
interval._validate_shape(
(np.array([-2.5, -3.5, -4.5]), np.array([2.5, 3.5, 4.5]))
)
# Fails shape (no units)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape((1, 2, 3))
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape([1, 2, 3])
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(1)
# Fails shape (units)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape((1 * u.m, 2 * u.m, 3 * u.m))
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape([1 * u.m, 2 * u.m, 3 * u.m])
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(
[[1 * u.m, 2 * u.m, 3 * u.m], [4 * u.m, 5 * u.m, 6 * u.m]]
)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(1 * u.m)
# Fails shape (arrays):
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape(
(np.array([-2.5, -3.5]), np.array([2.5, 3.5]), np.array([3, 4]))
)
with pytest.raises(ValueError, match=MESSAGE):
interval._validate_shape((np.array([-2.5, -3.5]), [2.5, 3.5]))
def test__validate_bounds(self):
# Passes
assert _Interval._validate_bounds(1, 2) == (1, 2)
assert _Interval._validate_bounds(1 * u.m, 2 * u.m) == (1 * u.m, 2 * u.m)
interval = _Interval._validate_bounds(
np.array([-2.5, -3.5]), np.array([2.5, 3.5])
)
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
# Fails
with pytest.warns(
RuntimeWarning,
match=r"Invalid interval: upper bound 1 is strictly "
r"less than lower bound 2\.",
):
_Interval._validate_bounds(2, 1)
with pytest.warns(
RuntimeWarning,
match=r"Invalid interval: upper bound 1\.0 m is strictly "
r"less than lower bound 2\.0 m\.",
):
_Interval._validate_bounds(2 * u.m, 1 * u.m)
def test_validate(self):
# Passes
assert _Interval.validate((1, 2)) == (1, 2)
assert _Interval.validate([1, 2]) == (1, 2)
assert _Interval.validate((1 * u.m, 2 * u.m)) == (1 * u.m, 2 * u.m)
assert _Interval.validate([1 * u.m, 2 * u.m]) == (1 * u.m, 2 * u.m)
assert _Interval.validate(((1, 2),)) == (1, 2)
assert _Interval.validate(([1, 2],)) == (1, 2)
assert _Interval.validate([(1, 2)]) == (1, 2)
assert _Interval.validate([[1, 2]]) == (1, 2)
assert _Interval.validate(((1 * u.m, 2 * u.m),)) == (1 * u.m, 2 * u.m)
assert _Interval.validate(([1 * u.m, 2 * u.m],)) == (1 * u.m, 2 * u.m)
assert _Interval.validate([(1 * u.m, 2 * u.m)]) == (1 * u.m, 2 * u.m)
assert _Interval.validate([[1 * u.m, 2 * u.m]]) == (1 * u.m, 2 * u.m)
interval = _Interval.validate((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
interval = _Interval.validate(
(np.array([-2.5, -3.5, -4.5]), np.array([2.5, 3.5, 4.5]))
)
assert (interval.lower == np.array([-2.5, -3.5, -4.5])).all()
assert (interval.upper == np.array([2.5, 3.5, 4.5])).all()
# Fail shape
MESSAGE = r"An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError, match=MESSAGE):
_Interval.validate((1, 2, 3))
# Fail bounds
with pytest.warns(RuntimeWarning):
_Interval.validate((2, 1))
def test_outside(self):
interval = _Interval.validate((0, 1))
# fmt: off
assert (
interval.outside(np.linspace(-1, 2, 13))
== [
True, True, True, True,
False, False, False, False, False,
True, True, True, True
]
).all()
# fmt: on
def test_domain(self):
interval = _Interval.validate((0, 1))
assert (interval.domain(0.25) == np.linspace(0, 1, 5)).all()
def test__ignored_interval(self):
assert _ignored_interval.lower == -np.inf
assert _ignored_interval.upper == np.inf
for num in [0, -1, -100, 3.14, 10**100, -(10**100)]:
assert not num < _ignored_interval[0]
assert num > _ignored_interval[0]
assert not num > _ignored_interval[1]
assert num < _ignored_interval[1]
assert not (_ignored_interval.outside(np.array([num]))).all()
def test_validate_with_SpectralCoord(self):
"""Regression test for issue #12439"""
lower = SpectralCoord(1, u.um)
upper = SpectralCoord(10, u.um)
interval = _Interval.validate((lower, upper))
assert interval.lower == lower
assert interval.upper == upper
class Test_BoundingDomain:
def setup_method(self):
class BoundingDomain(_BoundingDomain):
def fix_inputs(self, model, fix_inputs):
super().fix_inputs(model, fixed_inputs=fix_inputs)
def prepare_inputs(self, input_shape, inputs):
super().prepare_inputs(input_shape, inputs)
self.BoundingDomain = BoundingDomain
def test_create(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "C"
bounding_box = self.BoundingDomain(model, order="F")
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "F"
bounding_box = self.BoundingDomain(Gaussian2D(), ["x"])
assert bounding_box._ignored == [0]
assert bounding_box._order == "C"
# Error
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
self.BoundingDomain(model, order=mk.MagicMock())
def test_model(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box.model == model
def test_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock(), order="C")
assert bounding_box._order == "C"
assert bounding_box.order == "C"
bounding_box = self.BoundingDomain(mk.MagicMock(), order="F")
assert bounding_box._order == "F"
assert bounding_box.order == "F"
bounding_box._order = "test"
assert bounding_box.order == "test"
def test_ignored(self):
ignored = [0]
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = self.BoundingDomain(model, ignored=ignored)
assert bounding_box._ignored == ignored
assert bounding_box.ignored == ignored
def test__get_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Success (default 'C')
assert bounding_box._order == "C"
assert bounding_box._get_order() == "C"
assert bounding_box._get_order("C") == "C"
assert bounding_box._get_order("F") == "F"
# Success (default 'F')
bounding_box._order = "F"
assert bounding_box._order == "F"
assert bounding_box._get_order() == "F"
assert bounding_box._get_order("C") == "C"
assert bounding_box._get_order("F") == "F"
# Error
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_order(mk.MagicMock())
def test__get_index(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass input name
assert bounding_box._get_index("x") == 0
assert bounding_box._get_index("y") == 1
# Pass invalid input name
MESSAGE = r"'z' is not one of the inputs: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_index("z")
# Pass valid index
assert bounding_box._get_index(0) == 0
assert bounding_box._get_index(1) == 1
assert bounding_box._get_index(np.int32(0)) == 0
assert bounding_box._get_index(np.int32(1)) == 1
assert bounding_box._get_index(np.int64(0)) == 0
assert bounding_box._get_index(np.int64(1)) == 1
# Pass invalid index
MESSAGE = r"Integer key: .* must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(2)
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(np.int32(2))
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(np.int64(2))
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(-1)
# Pass invalid key
MESSAGE = r"Key value: .* must be string or integer"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_index(mk.MagicMock())
def test__get_name(self):
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = self.BoundingDomain(model)
index = mk.MagicMock()
name = mk.MagicMock()
model.inputs = mk.MagicMock()
model.inputs.__getitem__.return_value = name
assert bounding_box._get_name(index) == name
assert model.inputs.__getitem__.call_args_list == [mk.call(index)]
def test_ignored_inputs(self):
model = mk.MagicMock()
ignored = list(range(4, 8))
model.n_inputs = 8
model.inputs = [mk.MagicMock() for _ in range(8)]
bounding_box = self.BoundingDomain(model, ignored=ignored)
inputs = bounding_box.ignored_inputs
assert isinstance(inputs, list)
for index, _input in enumerate(inputs):
assert _input in model.inputs
assert model.inputs[index + 4] == _input
for index, _input in enumerate(model.inputs):
if _input in inputs:
assert inputs[index - 4] == _input
else:
assert index < 4
def test__validate_ignored(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass
assert bounding_box._validate_ignored(None) == []
assert bounding_box._validate_ignored(["x", "y"]) == [0, 1]
assert bounding_box._validate_ignored([0, 1]) == [0, 1]
assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1]
# Fail
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
bounding_box._validate_ignored([mk.MagicMock()])
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
bounding_box._validate_ignored(["z"])
MESSAGE = r"Integer key: 3 must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([3])
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([np.int32(3)])
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([np.int64(3)])
def test___call__(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
MESSAGE = (
r"This bounding box is fixed by the model and does not have adjustable"
r" parameters"
)
with pytest.raises(RuntimeError, match=MESSAGE):
bounding_box(*args, **kwargs)
def test_fix_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
model = mk.MagicMock()
fixed_inputs = mk.MagicMock()
with pytest.raises(
NotImplementedError, match=r"This should be implemented by a child class"
):
bounding_box.fix_inputs(model, fixed_inputs)
def test__prepare_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
with pytest.raises(
NotImplementedError,
match=r"This has not been implemented for BoundingDomain",
):
bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock())
def test__base_ouput(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Simple shape
input_shape = (13,)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
# Complex shape
input_shape = (13, 7)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
def test__all_out_output(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
# Simple shape
model.n_outputs = 1
input_shape = (13,)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (1, 13)
assert output_unit is None
# Complex shape
model.n_outputs = 6
input_shape = (13, 7)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (6, 13, 7)
assert output_unit is None
def test__modify_output(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
# Simple shape
with mk.patch.object(
_BoundingDomain,
"_base_output",
autospec=True,
return_value=np.asanyarray(0),
) as mkBase:
assert (
np.array([1, 2, 3])
== bounding_box._modify_output(
[1, 2, 3], valid_index, input_shape, fill_value
)
).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
# Replacement
with mk.patch.object(
_BoundingDomain,
"_base_output",
autospec=True,
return_value=np.array([1, 2, 3, 4, 5, 6]),
) as mkBase:
assert (
np.array([7, 2, 8, 4, 9, 6])
== bounding_box._modify_output(
[7, 8, 9], np.array([[0, 2, 4]]), input_shape, fill_value
)
).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
def test__prepare_outputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
valid_outputs = [mk.MagicMock() for _ in range(3)]
effects = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(
_BoundingDomain, "_modify_output", autospec=True, side_effect=effects
) as mkModify:
assert effects == bounding_box._prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkModify.call_args_list == [
mk.call(
bounding_box,
valid_outputs[idx],
valid_index,
input_shape,
fill_value,
)
for idx in range(3)
]
def test_prepare_outputs(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
valid_outputs = mk.MagicMock()
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with mk.patch.object(
_BoundingDomain, "_prepare_outputs", autospec=True
) as mkPrepare:
# Reshape valid_outputs
model.n_outputs = 1
assert mkPrepare.return_value == bounding_box.prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box, [valid_outputs], valid_index, input_shape, fill_value
)
]
mkPrepare.reset_mock()
# No reshape valid_outputs
model.n_outputs = 2
assert mkPrepare.return_value == bounding_box.prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box, valid_outputs, valid_index, input_shape, fill_value
)
]
def test__get_valid_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Don't get unit
assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None
# Get unit from unitless
assert bounding_box._get_valid_outputs_unit(7, True) is None
# Get unit
assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m
def test__evaluate_model(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
valid_inputs = mk.MagicMock()
input_shape = mk.MagicMock()
valid_index = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
with mk.patch.object(
_BoundingDomain, "_get_valid_outputs_unit", autospec=True
) as mkGet:
with mk.patch.object(
_BoundingDomain, "prepare_outputs", autospec=True
) as mkPrepare:
assert bounding_box._evaluate_model(
evaluate,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units,
) == (mkPrepare.return_value, mkGet.return_value)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box,
evaluate.return_value,
valid_index,
input_shape,
fill_value,
)
]
assert mkGet.call_args_list == [
mk.call(evaluate.return_value, with_units)
]
assert evaluate.call_args_list == [mk.call(valid_inputs)]
def test__evaluate(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
valid_inputs = mk.MagicMock()
valid_index = mk.MagicMock()
effects = [
(valid_inputs, valid_index, True),
(valid_inputs, valid_index, False),
]
with mk.patch.object(
self.BoundingDomain, "prepare_inputs", autospec=True, side_effect=effects
) as mkPrepare:
with mk.patch.object(
_BoundingDomain, "_all_out_output", autospec=True
) as mkAll:
with mk.patch.object(
_BoundingDomain, "_evaluate_model", autospec=True
) as mkEvaluate:
# all_out
assert (
bounding_box._evaluate(
evaluate, inputs, input_shape, fill_value, with_units
)
== mkAll.return_value
)
assert mkAll.call_args_list == [
mk.call(bounding_box, input_shape, fill_value)
]
assert mkEvaluate.call_args_list == []
assert mkPrepare.call_args_list == [
mk.call(bounding_box, input_shape, inputs)
]
mkAll.reset_mock()
mkPrepare.reset_mock()
# not all_out
assert (
bounding_box._evaluate(
evaluate, inputs, input_shape, fill_value, with_units
)
== mkEvaluate.return_value
)
assert mkAll.call_args_list == []
assert mkEvaluate.call_args_list == [
mk.call(
bounding_box,
evaluate,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units,
)
]
assert mkPrepare.call_args_list == [
mk.call(bounding_box, input_shape, inputs)
]
def test__set_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# set no unit
assert 27 == bounding_box._set_outputs_unit(27, None)
# set unit
assert 27 * u.m == bounding_box._set_outputs_unit(27, u.m)
def test_evaluate(self):
bounding_box = self.BoundingDomain(Gaussian2D())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
fill_value = mk.MagicMock()
outputs = mk.MagicMock()
valid_outputs_unit = mk.MagicMock()
value = (outputs, valid_outputs_unit)
with mk.patch.object(
_BoundingDomain, "_evaluate", autospec=True, return_value=value
) as mkEvaluate:
with mk.patch.object(
_BoundingDomain, "_set_outputs_unit", autospec=True
) as mkSet:
with mk.patch.object(Model, "input_shape", autospec=True) as mkShape:
with mk.patch.object(
Model, "bbox_with_units", new_callable=mk.PropertyMock
) as mkUnits:
assert tuple(mkSet.return_value) == bounding_box.evaluate(
evaluate, inputs, fill_value
)
assert mkSet.call_args_list == [
mk.call(outputs, valid_outputs_unit)
]
assert mkEvaluate.call_args_list == [
mk.call(
bounding_box,
evaluate,
inputs,
mkShape.return_value,
fill_value,
mkUnits.return_value,
)
]
assert mkShape.call_args_list == [
mk.call(bounding_box._model, inputs)
]
assert mkUnits.call_args_list == [mk.call()]
class TestModelBoundingBox:
def test_create(self):
intervals = ()
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "C"
# Set optional
intervals = {}
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model, order="F")
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "F"
# Set interval
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
# Set ignored
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 2
model.inputs = ["x", "y"]
bounding_box = ModelBoundingBox(intervals, model, ignored=[1])
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
assert bounding_box._ignored == [1]
intervals = ((1, 2), (3, 4))
model = mk.MagicMock()
model.n_inputs = 3
model.inputs = ["x", "y", "z"]
bounding_box = ModelBoundingBox(intervals, model, ignored=[2], order="F")
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2), 1: (3, 4)}
assert bounding_box._model == model
assert bounding_box._ignored == [2]
assert bounding_box._order == "F"
def test_copy(self):
bounding_box = ModelBoundingBox.validate(
Gaussian2D(), ((-4.5, 4.5), (-1.4, 1.4))
)
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
assert bounding_box.ignored == copy.ignored
assert id(bounding_box.ignored) != id(copy.ignored)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
# Check interval objects
for index, interval in bounding_box.intervals.items():
assert interval == copy.intervals[index]
assert id(interval) != id(copy.intervals[index])
# Same float values have will have same id
assert interval.lower == copy.intervals[index].lower
assert id(interval.lower) == id(copy.intervals[index].lower)
# Same float values have will have same id
assert interval.upper == copy.intervals[index].upper
assert id(interval.upper) == id(copy.intervals[index].upper)
assert len(bounding_box.intervals) == len(copy.intervals)
assert bounding_box.intervals.keys() == copy.intervals.keys()
def test_intervals(self):
intervals = {0: _Interval(1, 2)}
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = ModelBoundingBox(intervals, model)
assert bounding_box._intervals == intervals
assert bounding_box.intervals == intervals
def test_named_intervals(self):
intervals = {idx: _Interval(idx, idx + 1) for idx in range(4)}
model = mk.MagicMock()
model.n_inputs = 4
model.inputs = [mk.MagicMock() for _ in range(4)]
bounding_box = ModelBoundingBox(intervals, model)
named = bounding_box.named_intervals
assert isinstance(named, dict)
for name, interval in named.items():
assert name in model.inputs
assert intervals[model.inputs.index(name)] == interval
for index, name in enumerate(model.inputs):
assert index in intervals
assert name in named
assert intervals[index] == named[name]
def test___repr__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert (
bounding_box.__repr__() == "ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" y: Interval(lower=-4, upper=4)\n"
" }\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
intervals = {0: _Interval(-1, 1)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=["y"])
assert (
bounding_box.__repr__() == "ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['y']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
def test___len__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert len(bounding_box) == 0 == len(bounding_box._intervals)
def test___contains__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Contains with keys
assert "x" in bounding_box
assert "y" in bounding_box
assert "z" not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
# General not in
assert mk.MagicMock() not in bounding_box
# Contains with ignored
del bounding_box["y"]
# Contains with keys
assert "x" in bounding_box
assert "y" in bounding_box
assert "z" not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
def test___getitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Get using input key
assert bounding_box["x"] == (-1, 1)
assert bounding_box["y"] == (-4, 4)
# Fail with input key
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
bounding_box["z"]
# Get using index
assert bounding_box[0] == (-1, 1)
assert bounding_box[1] == (-4, 4)
assert bounding_box[np.int32(0)] == (-1, 1)
assert bounding_box[np.int32(1)] == (-4, 4)
assert bounding_box[np.int64(0)] == (-1, 1)
assert bounding_box[np.int64(1)] == (-4, 4)
# Fail with index
MESSAGE = r"Integer key: 2 must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box[2]
with pytest.raises(IndexError, match=MESSAGE):
bounding_box[np.int32(2)]
with pytest.raises(IndexError, match=MESSAGE):
bounding_box[np.int64(2)]
# get ignored interval
del bounding_box[0]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == (-4, 4)
del bounding_box[1]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == _ignored_interval
def test_bounding_box(self):
# 0D
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=["x"])
assert bounding_box.bounding_box() == (-np.inf, np.inf)
assert bounding_box.bounding_box("C") == (-np.inf, np.inf)
assert bounding_box.bounding_box("F") == (-np.inf, np.inf)
# 1D
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == (-1, 1)
assert bounding_box.bounding_box(mk.MagicMock()) == (-1, 1)
# > 1D
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box("C") == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box("F") == ((-1, 1), (-4, 4))
def test___eq__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == bounding_box
assert bounding_box == ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == (-1, 1)
assert not (bounding_box == mk.MagicMock())
assert not (bounding_box == (-2, 2))
assert not (
bounding_box == ModelBoundingBox.validate(model, {0: _Interval(-2, 2)})
)
# Respect ordering
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box_1 = ModelBoundingBox.validate(model, intervals)
bounding_box_2 = ModelBoundingBox.validate(model, intervals, order="F")
assert bounding_box_1._order == "C"
assert bounding_box_1 == ((-4, 4), (-1, 1))
assert not (bounding_box_1 == ((-1, 1), (-4, 4)))
assert bounding_box_2._order == "F"
assert not (bounding_box_2 == ((-4, 4), (-1, 1)))
assert bounding_box_2 == ((-1, 1), (-4, 4))
assert bounding_box_1 == bounding_box_2
# Respect ignored
model = Gaussian2D()
bounding_box_1._ignored = [mk.MagicMock()]
bounding_box_2._ignored = [mk.MagicMock()]
assert bounding_box_1._ignored != bounding_box_2._ignored
assert not (bounding_box_1 == bounding_box_2)
def test__setitem__(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=[0, 1])
assert bounding_box._ignored == [0, 1]
# USING Intervals directly
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box["x"] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box["x"], _Interval)
assert bounding_box["x"] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box["y"] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box["y"], _Interval)
assert bounding_box["y"] == (-4, 4)
del bounding_box["x"]
del bounding_box["y"]
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
del bounding_box[0]
del bounding_box[1]
# USING tuples
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box["x"] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box["x"], _Interval)
assert bounding_box["x"] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box["y"] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box["y"], _Interval)
assert bounding_box["y"] == (-4, 4)
del bounding_box["x"]
del bounding_box["y"]
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# USING Intervals directly
# Set interval using key
assert "x" not in bounding_box
bounding_box["x"] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert "x" in bounding_box
assert isinstance(bounding_box["x"], _Interval)
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
# USING tuples
# Set interval using key
bounding_box._intervals = {}
assert "x" not in bounding_box
bounding_box["x"] = (np.array([-1, -2]), np.array([1, 2]))
assert "x" in bounding_box
assert isinstance(bounding_box["x"], _Interval)
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = (np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test___delitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Using index
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert 0 in bounding_box
assert "x" in bounding_box
del bounding_box[0]
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
assert 0 in bounding_box
assert "x" in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError, match=r"Cannot delete ignored input: 0!"):
del bounding_box[0]
# Using key
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert 0 in bounding_box
assert "y" in bounding_box
del bounding_box["y"]
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
assert 0 in bounding_box
assert "y" in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError, match=r"Cannot delete ignored input: y!"):
del bounding_box["y"]
def test__validate_dict(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Input name keys
intervals = {"x": _Interval(-1, 1), "y": _Interval(-4, 4)}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_dict(intervals)
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Input index
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# name keys
intervals = {"x": _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert "x" not in bounding_box
bounding_box._validate_dict(intervals)
assert "x" in bounding_box
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
# input index
bounding_box._intervals = {}
intervals = {0: _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 0 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test__validate_sequence(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Default order
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# C order
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order="C")
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Fortran order
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order="F")
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Invalid order
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order=mk.MagicMock())
assert "x" not in bounding_box
assert "y" not in bounding_box
assert len(bounding_box.intervals) == 0
def test__n_inputs(self):
model = Gaussian2D()
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box._n_inputs == 2
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=["y"])
assert bounding_box._n_inputs == 1
bounding_box = ModelBoundingBox.validate(model, {}, ignored=["x", "y"])
assert bounding_box._n_inputs == 0
bounding_box._ignored = ["x", "y", "z"]
assert bounding_box._n_inputs == 0
def test__validate_iterable(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)), order="F")
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass with ignored
bounding_box._intervals = {}
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1)}
assert 0 not in bounding_box.intervals
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
# Invalid iterable
MESSAGE = "Found {} intervals, but must have exactly {}"
bounding_box._intervals = {}
bounding_box._ignored = []
assert "x" not in bounding_box
assert "y" not in bounding_box
with pytest.raises(ValueError, match=MESSAGE.format(3, 2)):
bounding_box._validate_iterable(((-4, 4), (-1, 1), (-3, 3)))
assert len(bounding_box.intervals) == 0
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
with pytest.raises(ValueError, match=MESSAGE.format(2, 1)):
bounding_box._validate_iterable(intervals)
assert len(bounding_box.intervals) == 0
bounding_box._ignored = []
intervals = {0: _Interval(-1, 1)}
with pytest.raises(ValueError, match=MESSAGE.format(1, 2)):
bounding_box._validate_iterable(intervals)
assert "x" not in bounding_box
assert "y" not in bounding_box
assert len(bounding_box.intervals) == 0
def test__validate(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)), order="F")
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert "x" not in bounding_box
assert "y" not in bounding_box
bounding_box._validate(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass single with ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox({}, model, ignored=[1])
assert 0 not in bounding_box.intervals
assert 1 not in bounding_box.intervals
bounding_box._validate(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert len(bounding_box.intervals) == 1
# Pass single
model = Gaussian1D()
bounding_box = ModelBoundingBox({}, model)
assert "x" not in bounding_box
bounding_box._validate((-1, 1))
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
sequence = (np.array([-1, -2]), np.array([1, 2]))
assert "x" not in bounding_box
bounding_box._validate(sequence)
assert "x" in bounding_box
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
def test_validate(self):
model = Gaussian2D()
kwargs = {"test": mk.MagicMock()}
# Pass sequence Default order
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box = ModelBoundingBox.validate(
model, ((-4, 4), (-1, 1)), order="F", **kwargs
)
assert (bounding_box._model.parameters == model.parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-4, 4)
assert "y" in bounding_box
assert bounding_box["y"] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals, order="F", **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == "F"
# Pass ModelBoundingBox
bbox = bounding_box
bounding_box = ModelBoundingBox.validate(model, bbox, **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == "F"
# Pass single ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(
model, intervals, ignored=["y"], **kwargs
)
assert (bounding_box._model.parameters == model.parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert "y" in bounding_box
assert bounding_box["y"] == _ignored_interval
assert len(bounding_box.intervals) == 1
# Pass single
bounding_box = ModelBoundingBox.validate(Gaussian1D(), (-1, 1), **kwargs)
assert (bounding_box._model.parameters == Gaussian1D().parameters).all()
assert "x" in bounding_box
assert bounding_box["x"] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
sequence = (np.array([-1, -2]), np.array([1, 2]))
bounding_box = ModelBoundingBox.validate(model, sequence, **kwargs)
assert "x" in bounding_box
assert (bounding_box["x"].lower == np.array([-1, -2])).all()
assert (bounding_box["x"].upper == np.array([1, 2])).all()
def test_fix_inputs(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4, 4), (-1, 1)))
# keep_ignored = False (default)
new_bounding_box = bounding_box.fix_inputs(Gaussian1D(), {1: mk.MagicMock()})
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian1D().parameters).all()
assert "x" in new_bounding_box
assert new_bounding_box["x"] == (-1, 1)
assert "y" not in new_bounding_box
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == []
# keep_ignored = True
new_bounding_box = bounding_box.fix_inputs(
Gaussian2D(), {1: mk.MagicMock()}, _keep_ignored=True
)
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian2D().parameters).all()
assert "x" in new_bounding_box
assert new_bounding_box["x"] == (-1, 1)
assert "y" in new_bounding_box
assert "y" in new_bounding_box.ignored_inputs
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == [1]
def test_dimension(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert bounding_box.dimension == 0 == len(bounding_box._intervals)
def test_domain(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# test defaults
assert (
np.array(bounding_box.domain(0.25))
== np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])
).all()
# test C order
assert (
np.array(bounding_box.domain(0.25, "C"))
== np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])
).all()
# test Fortran order
assert (
np.array(bounding_box.domain(0.25, "F"))
== np.array([np.linspace(-1, 1, 9), np.linspace(0, 2, 9)])
).all()
# test error order
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box.domain(0.25, mk.MagicMock())
def test__outside(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False for _ in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
# fmt: off
assert (
outside_index
== [
True, True, True, True,
False, False, False, False, False,
True, True, True, True
]
).all()
# fmt: on
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True for _ in range(13)]).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True]).all()
assert all_out and isinstance(all_out, bool)
def test__valid_index(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_prepare_inputs(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert (np.array(new_inputs) == np.array(inputs)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert (
np.array(new_inputs)
== np.array(
[
[x[4], x[5], x[6], x[7], x[8]],
[y[4], y[5], y[6], y[7], y[8]],
]
)
).all()
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert (np.array(new_inputs) == np.array([[0.5], [0.5]])).all()
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(
input_shape, inputs
)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_bounding_box_ignore(self):
"""Regression test for #13028"""
bbox_x = ModelBoundingBox((9, 10), Polynomial2D(1), ignored=["x"])
assert bbox_x.ignored_inputs == ["x"]
bbox_y = ModelBoundingBox((11, 12), Polynomial2D(1), ignored=["y"])
assert bbox_y.ignored_inputs == ["y"]
class Test_SelectorArgument:
def test_create(self):
index = mk.MagicMock()
ignore = mk.MagicMock()
argument = _SelectorArgument(index, ignore)
assert isinstance(argument, _BaseSelectorArgument)
assert argument.index == index
assert argument.ignore == ignore
assert argument == (index, ignore)
def test_validate(self):
model = Gaussian2D()
# default integer
assert _SelectorArgument.validate(model, 0) == (0, True)
assert _SelectorArgument.validate(model, 1) == (1, True)
# default string
assert _SelectorArgument.validate(model, "x") == (0, True)
assert _SelectorArgument.validate(model, "y") == (1, True)
ignore = mk.MagicMock()
# non-default integer
assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore)
# non-default string
assert _SelectorArgument.validate(model, "x", ignore) == (0, ignore)
assert _SelectorArgument.validate(model, "y", ignore) == (1, ignore)
# Fail
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
_SelectorArgument.validate(model, "z")
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer."
):
_SelectorArgument.validate(model, mk.MagicMock())
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
_SelectorArgument.validate(model, 2)
def test_get_selector(self):
# single inputs
inputs = [idx + 17 for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
# numpy array of single inputs
inputs = [np.array([idx + 11]) for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
inputs = [np.asanyarray(idx + 13) for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
# multi entry numpy array
inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(
*inputs
) == tuple(inputs[index])
def test_name(self):
model = Gaussian2D()
for index in range(model.n_inputs):
assert (
_SelectorArgument(index, mk.MagicMock()).name(model)
== model.inputs[index]
)
def test_pretty_repr(self):
model = Gaussian2D()
assert (
_SelectorArgument(0, False).pretty_repr(model)
== "Argument(name='x', ignore=False)"
)
assert (
_SelectorArgument(0, True).pretty_repr(model)
== "Argument(name='x', ignore=True)"
)
assert (
_SelectorArgument(1, False).pretty_repr(model)
== "Argument(name='y', ignore=False)"
)
assert (
_SelectorArgument(1, True).pretty_repr(model)
== "Argument(name='y', ignore=True)"
)
def test_get_fixed_value(self):
model = Gaussian2D()
values = {0: 5, "y": 7}
# Get index value
assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5
# Get name value
assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7
# Fail
MESSAGE = r".* was not found in .*"
with pytest.raises(RuntimeError, match=MESSAGE) as err:
_SelectorArgument(1, True).get_fixed_value(model, {0: 5})
def test_is_argument(self):
model = Gaussian2D()
argument = _SelectorArgument.validate(model, 0)
# Is true
assert argument.is_argument(model, 0) is True
assert argument.is_argument(model, "x") is True
# Is false
assert argument.is_argument(model, 1) is False
assert argument.is_argument(model, "y") is False
# Fail
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
argument.is_argument(model, "z")
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
argument.is_argument(model, mk.MagicMock())
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
argument.is_argument(model, 2)
def test_named_tuple(self):
model = Gaussian2D()
for index in range(model.n_inputs):
ignore = mk.MagicMock()
assert _SelectorArgument(index, ignore).named_tuple(model) == (
model.inputs[index],
ignore,
)
class Test_SelectorArguments:
def test_create(self):
arguments = _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False))
)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == []
kept_ignore = mk.MagicMock()
arguments = _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False)), kept_ignore
)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == kept_ignore
def test_pretty_repr(self):
model = Gaussian2D()
arguments = _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False))
)
assert (
arguments.pretty_repr(model) == "SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" Argument(name='y', ignore=False)\n"
")"
)
def test_ignore(self):
assert _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, True))
).ignore == [0, 1]
assert _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, True)), [13, 4]
).ignore == [0, 1, 13, 4]
assert _SelectorArguments(
(_SelectorArgument(0, True), _SelectorArgument(1, False))
).ignore == [0]
assert _SelectorArguments(
(_SelectorArgument(0, False), _SelectorArgument(1, True))
).ignore == [1]
assert (
_SelectorArguments(
(_SelectorArgument(0, False), _SelectorArgument(1, False))
).ignore
== []
)
assert _SelectorArguments(
(_SelectorArgument(0, False), _SelectorArgument(1, False)), [17, 14]
).ignore == [17, 14]
def test_validate(self):
# Integer key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0, True), (1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Default ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0,), (1,)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, True))
assert arguments.kept_ignore == []
# String key and passed ignore
arguments = _SelectorArguments.validate(
Gaussian2D(), (("x", True), ("y", False))
)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Test kept_ignore option
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments, [11, 5, 8])
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [11, 5, 8]
arguments._kept_ignore = [13, 17, 14]
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [13, 17, 14]
# Invalid, bad argument
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
_SelectorArguments.validate(Gaussian2D(), ((0, True), ("z", False)))
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
_SelectorArguments.validate(
Gaussian2D(), ((mk.MagicMock(), True), (1, False))
)
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (2, False)))
# Invalid, repeated argument
with pytest.raises(ValueError, match=r"Input: 'x' has been repeated"):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (0, False)))
# Invalid, no arguments
with pytest.raises(
ValueError, match=r"There must be at least one selector argument"
):
_SelectorArguments.validate(Gaussian2D(), ())
def test_get_selector(self):
inputs = [idx + 19 for idx in range(4)]
assert _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).get_selector(*inputs) == tuple(inputs[:2])
assert _SelectorArguments.validate(
Gaussian2D(), ((1, True), (0, False))
).get_selector(*inputs) == tuple(inputs[:2][::-1])
assert _SelectorArguments.validate(Gaussian2D(), ((1, False),)).get_selector(
*inputs
) == (inputs[1],)
assert _SelectorArguments.validate(Gaussian2D(), ((0, True),)).get_selector(
*inputs
) == (inputs[0],)
def test_is_selector(self):
# Is Selector
assert _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector((0.5, 2.5))
assert _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector(
(0.5,)
)
# Is not selector
assert not _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector((0.5, 2.5, 3.5))
assert not _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector((0.5,))
assert not _SelectorArguments.validate(
Gaussian2D(), ((0, True), (1, False))
).is_selector(0.5)
assert not _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector(
(0.5, 2.5)
)
assert not _SelectorArguments.validate(Gaussian2D(), ((0, True),)).is_selector(
2.5
)
def test_get_fixed_values(self):
model = Gaussian2D()
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {0: 11, 1: 7}) == (11, 7)
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {0: 5, "y": 47}) == (5, 47)
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {"x": 2, "y": 9}) == (2, 9)
assert _SelectorArguments.validate(
model, ((0, True), (1, False))
).get_fixed_values(model, {"x": 12, 1: 19}) == (12, 19)
def test_is_argument(self):
model = Gaussian2D()
# Is true
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, "x") is True
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, "y") is True
# Is true and false
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, "x") is True
assert arguments.is_argument(model, 1) is False
assert arguments.is_argument(model, "y") is False
arguments = _SelectorArguments.validate(model, ((1, False),))
assert arguments.is_argument(model, 0) is False
assert arguments.is_argument(model, "x") is False
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, "y") is True
def test_selector_index(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.selector_index(model, 0) == 0
assert arguments.selector_index(model, "x") == 0
assert arguments.selector_index(model, 1) == 1
assert arguments.selector_index(model, "y") == 1
arguments = _SelectorArguments.validate(model, ((1, True), (0, False)))
assert arguments.selector_index(model, 0) == 1
assert arguments.selector_index(model, "x") == 1
assert arguments.selector_index(model, 1) == 0
assert arguments.selector_index(model, "y") == 0
# Error
arguments = _SelectorArguments.validate(model, ((0, True),))
with pytest.raises(
ValueError, match=r"y does not correspond to any selector argument"
):
arguments.selector_index(model, "y")
def test_add_ignore(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments == ((0, True),)
assert arguments._kept_ignore == []
new_arguments0 = arguments.add_ignore(model, 1)
assert new_arguments0 == arguments
assert new_arguments0._kept_ignore == [1]
assert arguments._kept_ignore == []
assert arguments._kept_ignore == []
new_arguments1 = new_arguments0.add_ignore(model, "y")
assert new_arguments1 == arguments == new_arguments0
assert new_arguments0._kept_ignore == [1]
assert new_arguments1._kept_ignore == [1, 1]
assert arguments._kept_ignore == []
# Error
with pytest.raises(
ValueError, match=r"0: is a selector argument and cannot be ignored"
):
arguments.add_ignore(model, 0)
def test_reduce(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
new_arguments = arguments.reduce(model, 0)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, "x")
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 1)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, "y")
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
def test_named_tuple(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.named_tuple(model) == (("x", True), ("y", False))
class TestCompoundBoundingBox:
def test_create(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox(
bounding_boxes, model, selector_args, create_selector, order="F"
)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == "F"
def test_copy(self):
bounding_box = CompoundBoundingBox.validate(
Gaussian2D(),
{(1,): (-1.5, 1.3), (2,): (-2.7, 2.4)},
((0, True),),
mk.MagicMock(),
)
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
assert bounding_box._create_selector == copy._create_selector
assert id(bounding_box._create_selector) != id(copy._create_selector)
# Check selector_args
for index, argument in enumerate(bounding_box.selector_args):
assert argument == copy.selector_args[index]
assert id(argument) != id(copy.selector_args[index])
# Same integer values have will have same id
assert argument.index == copy.selector_args[index].index
assert id(argument.index) == id(copy.selector_args[index].index)
# Same boolean values have will have same id
assert argument.ignore == copy.selector_args[index].ignore
assert id(argument.ignore) == id(copy.selector_args[index].ignore)
assert len(bounding_box.selector_args) == len(copy.selector_args)
# Check bounding_boxes
for selector, bbox in bounding_box.bounding_boxes.items():
assert bbox == copy.bounding_boxes[selector]
assert id(bbox) != id(copy.bounding_boxes[selector])
assert bbox.ignored == copy.bounding_boxes[selector].ignored
assert id(bbox.ignored) != id(copy.bounding_boxes[selector].ignored)
# model is not copied to prevent infinite recursion
assert bbox._model == copy.bounding_boxes[selector]._model
assert id(bbox._model) == id(copy.bounding_boxes[selector]._model)
# Same string values have will have same id
assert bbox._order == copy.bounding_boxes[selector]._order
assert id(bbox._order) == id(copy.bounding_boxes[selector]._order)
# Check interval objects
for index, interval in bbox.intervals.items():
assert interval == copy.bounding_boxes[selector].intervals[index]
assert id(interval) != id(
copy.bounding_boxes[selector].intervals[index]
)
# Same float values have will have same id
assert (
interval.lower
== copy.bounding_boxes[selector].intervals[index].lower
)
assert id(interval.lower) == id(
copy.bounding_boxes[selector].intervals[index].lower
)
# Same float values have will have same id
assert (
interval.upper
== copy.bounding_boxes[selector].intervals[index].upper
)
assert id(interval.upper) == id(
copy.bounding_boxes[selector].intervals[index].upper
)
assert len(bbox.intervals) == len(copy.bounding_boxes[selector].intervals)
assert (
bbox.intervals.keys() == copy.bounding_boxes[selector].intervals.keys()
)
assert len(bounding_box.bounding_boxes) == len(copy.bounding_boxes)
assert bounding_box.bounding_boxes.keys() == copy.bounding_boxes.keys()
def test___repr__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (
bounding_box.__repr__() == "CompoundBoundingBox(\n"
" bounding_boxes={\n"
" (1,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" (2,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-2, upper=2)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" }\n"
" selector_args = SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" )\n"
")"
)
def test_bounding_boxes(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box.bounding_boxes == bounding_boxes
def test_selector_args(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_box = CompoundBoundingBox({}, model, selector_args)
# Get
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
# Set
selector_args = ((1, False),)
with pytest.warns(RuntimeWarning, match=r"Overriding selector_args.*"):
bounding_box.selector_args = selector_args
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
def test_create_selector(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1,),), create_selector)
assert bounding_box._create_selector == create_selector
assert bounding_box.create_selector == create_selector
def test__get_selector_key(self):
bounding_box = CompoundBoundingBox({}, Gaussian2D(), ((1, True),))
assert len(bounding_box.bounding_boxes) == 0
# Singular
assert bounding_box._get_selector_key(5) == (5,)
assert bounding_box._get_selector_key((5,)) == (5,)
assert bounding_box._get_selector_key([5]) == (5,)
assert bounding_box._get_selector_key(np.asanyarray(5)) == (5,)
assert bounding_box._get_selector_key(np.array([5])) == (5,)
# multiple
assert bounding_box._get_selector_key((5, 19)) == (5, 19)
assert bounding_box._get_selector_key([5, 19]) == (5, 19)
assert bounding_box._get_selector_key(np.array([5, 19])) == (5, 19)
def test___setitem__(self):
model = Gaussian2D()
# Ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, True),), order="F")
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15,)] = (-15, 15)
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == (-15, 15)
assert bounding_box._bounding_boxes[(15,)].order == "F"
# Invalid key
assert (7, 13) not in bounding_box._bounding_boxes
with pytest.raises(ValueError, match=".* is not a selector!"):
bounding_box[(7, 13)] = (-7, 7)
assert (7, 13) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(
ValueError, match="An interval must be some sort of sequence of length 2"
):
bounding_box[(13,)] = ((-13, 13), (-3, 3))
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# No ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, False),), order="F")
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15,)] = ((-15, 15), (-6, 6))
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == ((-15, 15), (-6, 6))
assert bounding_box._bounding_boxes[(15,)].order == "F"
# Invalid key
assert (14, 11) not in bounding_box._bounding_boxes
with pytest.raises(ValueError, match=".* is not a selector!"):
bounding_box[(14, 11)] = ((-7, 7), (-12, 12))
assert (14, 11) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(
ValueError, match="An interval must be some sort of sequence of length 2"
):
bounding_box[(13,)] = (-13, 13)
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
def test__validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
# Tuple selector_args
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox({}, model, selector_args)
bounding_box._validate(bounding_boxes)
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
def test___eq__(self):
bounding_box_1 = CompoundBoundingBox(
{(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),)
)
bounding_box_2 = CompoundBoundingBox(
{(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),)
)
# Equal
assert bounding_box_1 == bounding_box_2
# Not equal to non-compound bounding_box
assert not bounding_box_1 == mk.MagicMock()
assert not bounding_box_2 == mk.MagicMock()
# Not equal bounding_boxes
bounding_box_2[(15,)] = (-15, 15)
assert not bounding_box_1 == bounding_box_2
del bounding_box_2._bounding_boxes[(15,)]
assert bounding_box_1 == bounding_box_2
# Not equal selector_args
bounding_box_2._selector_args = _SelectorArguments.validate(
Gaussian2D(), ((0, False),)
)
assert not bounding_box_1 == bounding_box_2
bounding_box_2._selector_args = _SelectorArguments.validate(
Gaussian2D(), ((0, True),)
)
assert bounding_box_1 == bounding_box_2
# Not equal create_selector
bounding_box_2._create_selector = mk.MagicMock()
assert not bounding_box_1 == bounding_box_2
def test_validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
# Fail selector_args
MESSAGE = r"Selector arguments must be provided .*"
with pytest.raises(ValueError, match=MESSAGE):
CompoundBoundingBox.validate(model, bounding_boxes)
# Normal validate
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args, create_selector, order="F"
)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == "F"
# Re-validate
new_bounding_box = CompoundBoundingBox.validate(model, bounding_box)
assert bounding_box == new_bounding_box
assert new_bounding_box._order == "F"
# Default order
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args, create_selector
)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == "C"
def test___contains__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (1,) in bounding_box
assert (2,) in bounding_box
assert (3,) not in bounding_box
assert 1 not in bounding_box
assert 2 not in bounding_box
def test__create_bounding_box(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1, False),), create_selector)
# Create is successful
create_selector.return_value = ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 0
bbox = bounding_box._create_bounding_box((7,))
assert isinstance(bbox, ModelBoundingBox)
assert bbox == ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 1
assert (7,) in bounding_box
assert isinstance(bounding_box[(7,)], ModelBoundingBox)
assert bounding_box[(7,)] == bbox
# Create is unsuccessful
create_selector.return_value = (-42, 42)
with pytest.raises(
ValueError, match="An interval must be some sort of sequence of length 2"
):
bounding_box._create_bounding_box((27,))
def test___getitem__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
# already exists
assert isinstance(bounding_box[1], ModelBoundingBox)
assert bounding_box[1] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[2] == (-2, 2)
assert isinstance(bounding_box[(1,)], ModelBoundingBox)
assert bounding_box[(1,)] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[(2,)] == (-2, 2)
# no selector
with pytest.raises(
RuntimeError, match="No bounding box is defined for selector: .*"
):
bounding_box[(3,)]
# Create a selector
bounding_box._create_selector = mk.MagicMock()
with mk.patch.object(
CompoundBoundingBox, "_create_bounding_box", autospec=True
) as mkCreate:
assert bounding_box[(3,)] == mkCreate.return_value
assert mkCreate.call_args_list == [mk.call(bounding_box, (3,))]
def test__select_bounding_box(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
inputs = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(
_SelectorArguments, "get_selector", autospec=True
) as mkSelector:
with mk.patch.object(
CompoundBoundingBox, "__getitem__", autospec=True
) as mkGet:
assert bounding_box._select_bounding_box(inputs) == mkGet.return_value
assert mkGet.call_args_list == [
mk.call(bounding_box, mkSelector.return_value)
]
assert mkSelector.call_args_list == [
mk.call(bounding_box.selector_args, *inputs)
]
def test_prepare_inputs(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
input_shape = mk.MagicMock()
with mk.patch.object(
ModelBoundingBox, "prepare_inputs", autospec=True
) as mkPrepare:
assert (
bounding_box.prepare_inputs(input_shape, [1, 2, 3])
== mkPrepare.return_value
)
assert mkPrepare.call_args_list == [
mk.call(bounding_box[(1,)], input_shape, [1, 2, 3])
]
mkPrepare.reset_mock()
assert (
bounding_box.prepare_inputs(input_shape, [2, 2, 3])
== mkPrepare.return_value
)
assert mkPrepare.call_args_list == [
mk.call(bounding_box[(2,)], input_shape, [2, 2, 3])
]
mkPrepare.reset_mock()
def test__matching_bounding_boxes(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes("x", value)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox
assert "x" in bbox.ignored_inputs
assert "y" in bbox
assert bbox["y"] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes("x", value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox
assert "x" in bbox.ignored_inputs
assert "y" in bbox
assert bbox["y"] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
matching = bounding_box._matching_bounding_boxes("y", value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "y" in bbox
assert "y" in bbox.ignored_inputs
assert "x" in bbox
assert bbox["x"] == (-(5 - value), (5 - value))
assert len(bbox.intervals) == 1
assert bbox.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
matching = bounding_box._matching_bounding_boxes("slit_id", 0)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)}
assert bbox.order == "F"
matching = bounding_box._matching_bounding_boxes("slit_id", 1)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 3047.5), "y": (-0.5, 4047.5)}
assert bbox.order == "F"
# Errors
MESSAGE = (
r"Attempting to fix input .*, but there are no bounding boxes for argument"
r" value .*"
)
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._matching_bounding_boxes("slit_id", 2)
def test__fix_input_selector_arg(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg("x", value)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox
assert "x" in bbox.ignored_inputs
assert "y" in bbox
assert bbox["y"] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4)),
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg("x", value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((1, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert "x" in bbox_selector
assert "x" in bbox_selector.ignored_inputs
assert "y" in bbox_selector
assert bbox_selector["y"] == (-value, value)
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [0]
bbox = bounding_box._fix_input_selector_arg("y", value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((0, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert "y" in bbox_selector
assert "y" in bbox_selector.ignored_inputs
assert "x" in bbox_selector
assert bbox_selector["x"] == (-(5 - value), (5 - value))
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
bbox = bounding_box._fix_input_selector_arg("slit_id", 0)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)}
assert bbox.order == "F"
bbox = bounding_box._fix_input_selector_arg("slit_id", 1)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ["slit_id"]
assert bbox.named_intervals == {"x": (-0.5, 3047.5), "y": (-0.5, 4047.5)}
assert bbox.order == "F"
def test__fix_input_bbox_arg(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
bbox = bounding_box._fix_input_bbox_arg("x", 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [0]
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert len(bbox._bounding_boxes) == 2
bbox = bounding_box._fix_input_bbox_arg("y", 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [1]
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert len(bbox._bounding_boxes) == 2
def test_fix_inputs(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ("x", "y", "slit_id")
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
bounding_box = CompoundBoundingBox.validate(
model, bounding_boxes, selector_args=[("slit_id", True)], order="F"
)
model.bounding_box = bounding_box
# Fix selector argument
new_model = fix_inputs(model, {"slit_id": 0})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {"x": (-0.5, 1047.5), "y": (-0.5, 2047.5)}
assert bbox.order == "F"
# Fix a bounding_box field
new_model = fix_inputs(model, {"x": 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(0,)].order == "F"
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert bbox._bounding_boxes[(1,)].order == "F"
assert len(bbox._bounding_boxes) == 2
new_model = fix_inputs(model, {"y": 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(0,)].order == "F"
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert bbox._bounding_boxes[(1,)].order == "F"
assert len(bbox._bounding_boxes) == 2
# Fix selector argument and a bounding_box field
new_model = fix_inputs(model, {"slit_id": 0, "x": 5})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {"y": (-0.5, 2047.5)}
assert bbox.order == "F"
new_model = fix_inputs(model, {"y": 5, "slit_id": 1})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {"x": (-0.5, 3047.5)}
assert bbox.order == "F"
# Fix two bounding_box fields
new_model = fix_inputs(model, {"x": 5, "y": 7})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert bbox.selector_args == ((0, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(0,)].order == "F"
assert bbox._bounding_boxes[(1,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(1,)].order == "F"
assert len(bbox._bounding_boxes) == 2
def test_complex_compound_bounding_box(self):
model = Identity(4)
bounding_boxes = {
(2.5, 1.3): ((-1, 1), (-3, 3)),
(2.5, 2.71): ((-3, 3), (-1, 1)),
}
selector_args = (("x0", True), ("x1", True))
bbox = CompoundBoundingBox.validate(model, bounding_boxes, selector_args)
assert bbox[(2.5, 1.3)] == ModelBoundingBox(
((-1, 1), (-3, 3)), model, ignored=["x0", "x1"]
)
assert bbox[(2.5, 2.71)] == ModelBoundingBox(
((-3, 3), (-1, 1)), model, ignored=["x0", "x1"]
)
|
da8bd24a632c88b69c9cf0c6dfdfa4be68b50b113fe1af81ffa9d60256158292 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import os
import subprocess
import sys
import unittest.mock as mk
from inspect import signature
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy
import astropy.modeling.core as core
import astropy.units as u
from astropy.convolution import convolve_models
from astropy.modeling import models
from astropy.modeling.bounding_box import CompoundBoundingBox, ModelBoundingBox
from astropy.modeling.core import (
SPECIAL_OPERATORS,
CompoundModel,
Model,
_add_special_operator,
bind_bounding_box,
bind_compound_bounding_box,
custom_model,
fix_inputs,
)
from astropy.modeling.parameters import Parameter
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (
str(m) == "Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5"
)
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
n_outputs = 1
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=np.int64(0))
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
MESSAGE = r"Gaussian1D.__init__.* got an unrecognized parameter 'wrong'"
with pytest.raises(TypeError, match=MESSAGE):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ["self", "args", "meta", "name", "kwargs"]
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ("a", "b")
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ("a", "b")
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ("a",)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "kwargs"]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_model_n_outputs():
"""
Test creating a custom_model which has more than one output, which
requires special handling.
Demonstrates issue #11791's ``n_outputs`` error has been solved
"""
@custom_model
def model(x, y, n_outputs=2):
return x + 1, y + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 2
assert m.outputs == ("x0", "x1")
assert (
separability_matrix(m)
== [
[True, True],
[True, True],
]
).all()
@custom_model
def model(x, y, z, n_outputs=3):
return x + 1, y + 1, z + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 3
assert m.outputs == ("x0", "x1", "x2")
assert (
separability_matrix(m)
== [
[True, True, True],
[True, True, True],
[True, True, True],
]
).all()
def test_custom_model_settable_parameters():
"""
Test creating a custom_model which specifically sets adjustable model
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, settable parameters
should be allowed to have defaults set.
"""
@custom_model
def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.bounding_box == ((1, 2), (3, 4))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
m = model(bounding_box=((5, 6), (7, 8)))
assert m.n_outputs == 2
assert m.bounding_box == ((5, 6), (7, 8))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
@custom_model
def model(x, y, n_outputs=2, outputs=("z0", "z1")):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.outputs == ("z0", "z1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1")
m = model(outputs=("w0", "w1"))
assert m.n_outputs == 2
assert m.outputs == ("w0", "w1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1")
def test_custom_model_regected_parameters():
"""
Test creating a custom_model which attempts to override non-overridable
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, non-settable parameters
should raise an error (unexpected behavior may occur).
"""
with pytest.raises(
ValueError, match=r"Parameter 'n_inputs' cannot be a model property: *"
):
@custom_model
def model1(x, y, n_outputs=2, n_inputs=3):
return x + 1, y + 1
with pytest.raises(
ValueError, match=r"Parameter 'uses_quantity' cannot be a model property: *"
):
@custom_model
def model2(x, y, n_outputs=2, uses_quantity=True):
return x + 1, y + 1
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2.0 / 3.0), c1=(1.0 / 3.0))
MESSAGE = (
r"No analytical or user-supplied inverse transform has been implemented for"
r" this model"
)
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
n_inputs = 0
outputs = ("y",)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, 0.2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
@pytest.mark.filterwarnings("ignore:invalid value encountered in less")
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13.0, y0=10.0, z0=8.0, a=4.0, b=3.0, c=2.0, amp=1.0):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return (
(self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a),
)
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_render_model_out_dtype():
"""Test different out.dtype for model.render."""
MESSAGE = (
r"Cannot cast ufunc 'add' output from .* to .* with casting rule 'same_kind"
)
for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]:
for dtype in [np.float64, np.float32, np.complex64]:
im = np.zeros((40, 40), dtype=dtype)
imout = model.render(out=im)
assert imout is im
assert imout.sum() != 0
with pytest.raises(TypeError, match=MESSAGE):
im = np.zeros((40, 40), dtype=np.int32)
imout = model.render(out=im)
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D().n_submodels == 1
assert models.Gaussian2D().n_submodels == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2, 3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._leaflist) != id(new_model._leaflist)
assert id(model[0]) != id(new_model[0])
assert id(model[1]) != id(new_model[1])
assert id(model[2]) != id(new_model[2])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_units_with_bounding_box():
points = np.arange(10, 20)
table = np.arange(10) * u.Angstrom
t = models.Tabular1D(points, lookup_table=table)
assert isinstance(t(10), u.Quantity)
assert isinstance(t(10, with_bounding_box=True), u.Quantity)
assert_quantity_allclose(t(10), t(10, with_bounding_box=True))
RENAMED_MODEL = models.Gaussian1D.rename("CustomGaussian")
MODEL_RENAME_CODE = """
from astropy.modeling.models import Gaussian1D
print(repr(Gaussian1D))
print(repr(Gaussian1D.rename('CustomGaussian')))
""".strip()
MODEL_RENAME_EXPECTED = b"""
<class 'astropy.modeling.functional_models.Gaussian1D'>
Name: Gaussian1D
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
<class '__main__.CustomGaussian'>
Name: CustomGaussian (Gaussian1D)
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
""".strip()
def test_rename_path(tmp_path):
# Regression test for a bug that caused the path to the class to be
# incorrect in a renamed model's __repr__.
assert (
repr(RENAMED_MODEL).splitlines()[0]
== "<class 'astropy.modeling.tests.test_core.CustomGaussian'>"
)
# Make sure that when called from a user script, the class name includes
# __main__.
env = os.environ.copy()
paths = [os.path.dirname(astropy.__path__[0])] + sys.path
env["PYTHONPATH"] = os.pathsep.join(paths)
script = tmp_path / "rename.py"
with open(script, "w") as f:
f.write(MODEL_RENAME_CODE)
output = subprocess.check_output([sys.executable, script], env=env)
assert output.splitlines() == MODEL_RENAME_EXPECTED.splitlines()
@pytest.mark.parametrize(
"model_class",
[models.Gaussian1D, models.Polynomial1D, models.Shift, models.Tabular1D],
)
def test_rename_1d(model_class):
new_model = model_class.rename(name="Test1D")
assert new_model.name == "Test1D"
@pytest.mark.parametrize(
"model_class", [models.Gaussian2D, models.Polynomial2D, models.Tabular2D]
)
def test_rename_2d(model_class):
new_model = model_class.rename(name="Test2D")
assert new_model.name == "Test2D"
def test_fix_inputs_integer():
"""
Tests that numpy integers can be passed as dictionary keys to fix_inputs
Issue #11358
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {1: 22})
assert mf(1) == (1, 22)
mf_int32 = models.fix_inputs(m, {np.int32(1): 33})
assert mf_int32(1) == (1, 33)
mf_int64 = models.fix_inputs(m, {np.int64(1): 44})
assert mf_int64(1) == (1, 44)
def test_fix_inputs_empty_dict():
"""
Tests that empty dictionary can be passed to fix_inputs
Issue #11355
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {})
assert mf(1, 2) == (1, 2)
def test_rename_inputs_outputs():
g2 = models.Gaussian2D(10, 2, 3, 1, 2)
assert g2.inputs == ("x", "y")
assert g2.outputs == ("z",)
MESSAGE = r"Expected .* number of .*, got .*"
with pytest.raises(ValueError, match=MESSAGE):
g2.inputs = ("w",)
with pytest.raises(ValueError, match=MESSAGE):
g2.outputs = ("w", "e")
def test__prepare_output_single_model():
model = models.Gaussian1D()
# No broadcast
assert (
np.array([1, 2]) == model._prepare_output_single_model(np.array([1, 2]), None)
).all()
# Broadcast to scalar
assert 1 == model._prepare_output_single_model(np.array([1]), ())
assert 2 == model._prepare_output_single_model(np.asanyarray(2), ())
# Broadcast reshape
output = np.array([[1, 2, 3], [4, 5, 6]])
reshape = np.array([[1, 2], [3, 4], [5, 6]])
assert (output == model._prepare_output_single_model(output, (2, 3))).all()
assert (reshape == model._prepare_output_single_model(output, (3, 2))).all()
# Broadcast reshape scalar
assert 1 == model._prepare_output_single_model(np.array([1]), (1, 2))
assert 2 == model._prepare_output_single_model(np.asanyarray(2), (3, 4))
# Fail to broadcast
assert (output == model._prepare_output_single_model(output, (1, 2))).all()
assert (output == model._prepare_output_single_model(output, (3, 4))).all()
def test_prepare_outputs_mixed_broadcast():
"""
Tests that _prepare_outputs_single_model does not fail when a smaller
array is passed as first input, but output is broadcast to larger
array.
Issue #10170
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model([1, 2], 3)
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.9692332344763441, 1.0])
output = model(4, [5, 6])
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.8146473164114145, 0.7371233743916278])
def test_prepare_outputs_complex_reshape():
x = np.array(
[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
]
)
y = np.array(
[
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
[26, 27, 28, 29, 30],
]
)
m = models.Identity(3) | models.Mapping((2, 1, 0))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((2, 1), n_inputs=3)
output = mf(1, 2)
assert output == (22, 2, 1)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
m = models.Identity(3) | models.Mapping((0, 1, 2))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((0, 1), n_inputs=3)
output = mf(1, 2)
assert output == (1, 2, 22)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
def test_prepare_outputs_single_entry_vector():
"""
jwst and gwcs both require that single entry vectors produce single
entry output vectors, not scalars. This tests for that behavior.
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model(np.array([1]), np.array([2]))
assert output.shape == (1,)
np.testing.assert_allclose(output, [0.9500411305585278])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings("ignore: Using a non-tuple")
def test_prepare_outputs_sparse_grid():
"""
Test to show that #11060 has been solved.
"""
shape = (3, 3)
data = np.arange(np.product(shape)).reshape(shape) * u.m / u.s
points_unit = u.pix
points = [np.arange(size) * points_unit for size in shape]
kwargs = {
"bounds_error": False,
"fill_value": np.nan,
"method": "nearest",
}
transform = models.Tabular2D(points, data, **kwargs)
truth = (
np.array(
[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
]
)
* u.m
/ u.s
)
points = np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=True)
x = points[0] * u.pix
y = points[1] * u.pix
value = transform(x, y)
assert (value == truth).all()
points = (
np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=False) * u.pix
)
value = transform(*points)
assert (value == truth).all()
def test_coerce_units():
model = models.Polynomial1D(1, c0=1, c1=2)
MESSAGE = r"Can only apply 'add' function to dimensionless quantities when other .*"
with pytest.raises(u.UnitsError, match=MESSAGE):
model(u.Quantity(10, u.m))
with_input_units = model.coerce_units({"x": u.m})
result = with_input_units(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_input_units_tuple = model.coerce_units((u.m,))
result = with_input_units_tuple(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_return_units = model.coerce_units(return_units={"y": u.s})
result = with_return_units(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_return_units_tuple = model.coerce_units(return_units=(u.s,))
result = with_return_units_tuple(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_both = model.coerce_units({"x": u.m}, {"y": u.s})
result = with_both(u.Quantity(10, u.m))
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with pytest.raises(
ValueError, match=r"input_units keys.*do not match model inputs"
):
model.coerce_units({"q": u.m})
with pytest.raises(ValueError, match=r"input_units length does not match n_inputs"):
model.coerce_units((u.m, u.s))
model_with_existing_input_units = models.BlackBody()
with pytest.raises(
ValueError,
match=r"Cannot specify input_units for model with existing input units",
):
model_with_existing_input_units.coerce_units({"x": u.m})
with pytest.raises(
ValueError, match=r"return_units keys.*do not match model outputs"
):
model.coerce_units(return_units={"q": u.m})
with pytest.raises(
ValueError, match=r"return_units length does not match n_outputs"
):
model.coerce_units(return_units=(u.m, u.s))
def test_bounding_box_general_inverse():
model = NonFittableModel(42.5)
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
model.bounding_box = ()
assert model.bounding_box.bounding_box() == ()
model.inverse = NonFittableModel(3.14)
inverse_model = model.inverse
with pytest.raises(NotImplementedError, match=MESSAGE):
inverse_model.bounding_box
def test__add_special_operator():
sop_name = "name"
sop = "value"
key = _add_special_operator(sop_name, "value")
assert key[0] == sop_name
assert key[1] == SPECIAL_OPERATORS._unique_id
assert key in SPECIAL_OPERATORS
assert SPECIAL_OPERATORS[key] == sop
def test_print_special_operator_CompoundModel(capsys):
"""
Test that issue #11310 has been fixed
"""
model = convolve_models(models.Sersic2D(), models.Gaussian2D())
with astropy.conf.set_temp("max_width", 80):
# fmt: off
assert str(model) == (
"Model: CompoundModel\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"Expression: convolve_fft (([0]), ([1]))\n"
"Components: \n"
" [0]: <Sersic2D(amplitude=1., r_eff=1., n=4., "
"x_0=0., y_0=0., ellip=0., theta=0.)>\n"
"\n"
" [1]: <Gaussian2D(amplitude=1., x_mean=0., y_mean=0., "
"x_stddev=1., y_stddev=1., theta=0.)>\n"
"Parameters:\n"
" amplitude_0 r_eff_0 n_0 x_0_0 y_0_0 ... y_mean_1 x_stddev_1 y_stddev_1 theta_1\n"
" ----------- ------- --- ----- ----- ... -------- ---------- ---------- -------\n"
" 1.0 1.0 4.0 0.0 0.0 ... 0.0 1.0 1.0 0.0"
)
# fmt: on
def test__validate_input_shape():
model = models.Gaussian1D()
model._n_models = 2
_input = np.array(
[
[1, 2, 3],
[4, 5, 6],
]
)
# Successful validation
assert model._validate_input_shape(_input, 0, model.inputs, 1, False) == (2, 3)
# Fail number of axes
MESSAGE = r"For model_set_axis=2, all inputs must be at least 3-dimensional"
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, model.inputs, 2, True)
# Fail number of models (has argname)
MESSAGE = r"Input argument '.*' does not have the correct dimensions in .*"
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, model.inputs, 1, True)
# Fail number of models (no argname)
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shape(_input, 0, [], 1, True)
def test__validate_input_shapes():
model = models.Gaussian1D()
model._n_models = 2
inputs = [mk.MagicMock() for _ in range(3)]
argnames = mk.MagicMock()
model_set_axis = mk.MagicMock()
all_shapes = [mk.MagicMock() for _ in inputs]
# Successful validation
with mk.patch.object(
Model, "_validate_input_shape", autospec=True, side_effect=all_shapes
) as mkValidate:
with mk.patch.object(core, "check_broadcast", autospec=True) as mkCheck:
assert mkCheck.return_value == model._validate_input_shapes(
inputs, argnames, model_set_axis
)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
# Fail check_broadcast
MESSAGE = r"All inputs must have identical shapes or must be scalars"
with mk.patch.object(
Model, "_validate_input_shape", autospec=True, side_effect=all_shapes
) as mkValidate:
with mk.patch.object(
core, "check_broadcast", autospec=True, return_value=None
) as mkCheck:
with pytest.raises(ValueError, match=MESSAGE):
model._validate_input_shapes(inputs, argnames, model_set_axis)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
def test__remove_axes_from_shape():
model = models.Gaussian1D()
# len(shape) == 0
assert model._remove_axes_from_shape((), mk.MagicMock()) == ()
# axis < 0
assert model._remove_axes_from_shape((1, 2, 3), -1) == (1, 2)
assert model._remove_axes_from_shape((1, 2, 3), -2) == (1, 3)
assert model._remove_axes_from_shape((1, 2, 3), -3) == (2, 3)
# axis >= len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 3) == ()
assert model._remove_axes_from_shape((1, 2, 3), 4) == ()
# 0 <= axis < len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 0) == (2, 3)
assert model._remove_axes_from_shape((1, 2, 3), 1) == (3,)
assert model._remove_axes_from_shape((1, 2, 3), 2) == ()
def test_get_bounding_box():
model = models.Const2D(2)
# No with_bbox
assert model.get_bounding_box(False) is None
# No bounding_box
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
model.bounding_box
assert model.get_bounding_box(True) is None
# Normal bounding_box
model.bounding_box = ((0, 1), (0, 1))
assert not isinstance(model.bounding_box, CompoundBoundingBox)
assert model.get_bounding_box(True) == ((0, 1), (0, 1))
# CompoundBoundingBox with no removal
bbox = CompoundBoundingBox.validate(
model,
{(1,): ((-1, 0), (-1, 0)), (2,): ((0, 1), (0, 1))},
selector_args=[("y", False)],
)
model.bounding_box = bbox
assert isinstance(model.bounding_box, CompoundBoundingBox)
# Get using argument not with_bbox
assert model.get_bounding_box(True) == bbox
# Get using with_bbox not argument
assert model.get_bounding_box((1,)) == ((-1, 0), (-1, 0))
assert model.get_bounding_box((2,)) == ((0, 1), (0, 1))
def test_compound_bounding_box():
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox1 = CompoundBoundingBox.validate(
model, {(1,): (-1, 0), (2,): (0, 1)}, selector_args=[("x", False)]
)
bbox2 = CompoundBoundingBox.validate(
model, {(-0.5,): (-1, 0), (0.5,): (0, 1)}, selector_args=[("x", False)]
)
# Using with_bounding_box to pass a selector
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=(1,)) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=(2,)) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
# Using argument value to pass bounding_box
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
MESSAGE = r"No bounding box is defined for selector: .*"
with pytest.raises(RuntimeError, match=MESSAGE):
model(0, with_bounding_box=True)
model1 = models.Gaussian1D()
truth1 = models.Gaussian1D()
model2 = models.Const1D(2)
truth2 = models.Const1D(2)
model = model1 + model2
truth = truth1 + truth2
assert isinstance(model, CompoundModel)
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=1) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=2) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError, match=MESSAGE):
model(0, with_bounding_box=True)
def test_bind_bounding_box():
model = models.Polynomial2D(3)
bbox = ((-1, 1), (-2, 2))
bind_bounding_box(model, bbox)
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box["x"] == (-2, 2)
assert model.bounding_box["y"] == (-1, 1)
bind_bounding_box(model, bbox, order="F")
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box["x"] == (-1, 1)
assert model.bounding_box["y"] == (-2, 2)
def test_bind_compound_bounding_box_using_with_bounding_box_select():
"""
This demonstrates how to bind multiple bounding_boxes which are
selectable using the `with_bounding_box`, note there must be a
fall-back to implicit.
"""
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox = (0, 1)
MESSAGE = r"'tuple' object has no attribute 'items"
with pytest.raises(AttributeError, match=MESSAGE):
bind_compound_bounding_box(model, bbox, "x")
bbox = {0: (-1, 0), 1: (0, 1)}
bind_compound_bounding_box(model, bbox, [("x", False)])
# No bounding box
assert model(-0.5) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0) == truth(0)
assert model(1) == truth(1)
# `with_bounding_box` selects as `-0.5` will not be a key
assert model(-0.5, with_bounding_box=0) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=1))
# `with_bounding_box` selects as `0.5` will not be a key
assert model(0.5, with_bounding_box=1) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(0,)))
# Fall back onto implicit selector
assert model(0, with_bounding_box=True) == truth(0)
assert model(1, with_bounding_box=True) == truth(1)
# Attempt to fall-back on implicit selector, but no bounding_box
MESSAGE = r"No bounding box is defined for selector: .*"
with pytest.raises(RuntimeError, match=MESSAGE):
model(0.5, with_bounding_box=True)
# Override implicit selector
assert np.isnan(model(1, with_bounding_box=0))
def test_fix_inputs_compound_bounding_box():
base_model = models.Gaussian2D(1, 2, 3, 4, 5)
bbox = {2.5: (-1, 1), 3.14: (-7, 3)}
model = fix_inputs(base_model, {"y": 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {"x": 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"y": 2.5}, bounding_boxes=bbox, selector_args=(("y", True),)
)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=(("x", True),)
)
assert model.bounding_box == (-1, 1)
model = fix_inputs(
base_model, {"x": 2.5}, bounding_boxes=bbox, selector_args=((0, True),)
)
assert model.bounding_box == (-1, 1)
base_model = models.Identity(4)
bbox = {(2.5, 1.3): ((-1, 1), (-3, 3)), (2.5, 2.71): ((-3, 3), (-1, 1))}
model = fix_inputs(base_model, {"x0": 2.5, "x1": 1.3}, bounding_boxes=bbox)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(
base_model,
{"x0": 2.5, "x1": 1.3},
bounding_boxes=bbox,
selector_args=(("x0", True), ("x1", True)),
)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(
base_model,
{"x0": 2.5, "x1": 1.3},
bounding_boxes=bbox,
selector_args=((0, True), (1, True)),
)
assert model.bounding_box == ((-1, 1), (-3, 3))
def test_model_copy_with_bounding_box():
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5)), order="F")
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = ModelBoundingBox.validate(
model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F"
)
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_model_copy_with_compound_bounding_box():
model = models.Polynomial2D(2)
bbox = {(0,): (-0.5, 1047.5), (1,): (-0.5, 3047.5)}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("x", True)], order="F"
)
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(
model.bounding_box.selector_args
)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[
index
]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_compound_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)),
}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("slit_id", True)], order="F"
)
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(
model.bounding_box.selector_args
)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[
index
]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_user_attribute():
"""Regression test for issue #12370"""
model = models.Gaussian2D(100, 25, 25, 5, 5) | models.Identity(1)
model.xname = "x_mean" # user-defined attribute
assert hasattr(model, "xname")
assert model.xname == "x_mean"
model_copy = model.copy()
model_copy.xname
assert hasattr(model_copy, "xname")
assert model_copy.xname == "x_mean"
def test_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Gaussian2D()
bbox = ModelBoundingBox.validate(model, ((-1, 1), (-np.inf, np.inf)), order="F")
model.bounding_box = bbox
x = np.array([-0.5, 0.5])
y = 0
# Everything works when its all in the bounding box
assert (model(x, y) == (model(x, y, with_bounding_box=True))).all()
def test_compound_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = ModelBoundingBox.validate(
model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F"
)
model.bounding_box = bbox
x = np.array([1000, 1001])
y = np.array([2000, 2001])
slit_id = 0
# Everything works when its all in the bounding box
value0 = model(x, y, slit_id)
value1 = model(x, y, slit_id, with_bounding_box=True)
assert_equal(value0, value1)
def test_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,))
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
def test_compound_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,)) | models.Shift(1)
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
def test_bounding_box_pass_with_ignored():
"""Test the possibility of setting ignored variables in bounding box"""
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, (-1, 1), ignored=["y"])
model.bounding_box = bbox
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
model = models.Polynomial2D(2)
bind_bounding_box(model, (-1, 1), ignored=["y"])
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
def test_compound_bounding_box_pass_with_ignored():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = {
(0,): (-0.5, 1047.5),
(1,): (-0.5, 2047.5),
}
cbbox = CompoundBoundingBox.validate(
model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F"
)
model.bounding_box = cbbox
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bind_compound_bounding_box(
model, bbox, selector_args=[("slit_id", True)], ignored=["y"], order="F"
)
assert model.bounding_box == cbbox
@pytest.mark.parametrize("int_type", [int, np.int32, np.int64, np.uint32, np.uint64])
def test_model_integer_indexing(int_type):
"""Regression for PR 12561; verify that compound model components
can be accessed by integer index"""
gauss = models.Gaussian2D()
airy = models.AiryDisk2D()
compound = gauss + airy
assert compound[int_type(0)] == gauss
assert compound[int_type(1)] == airy
def test_model_string_indexing():
"""Regression for PR 12561; verify that compound model components
can be accessed by indexing with model name"""
gauss = models.Gaussian2D()
gauss.name = "Model1"
airy = models.AiryDisk2D()
airy.name = "Model2"
compound = gauss + airy
assert compound["Model1"] == gauss
assert compound["Model2"] == airy
|
53617f41d82747529254061dc7fb7322c65e28645244caae18facce7826cb5da | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helpers functions for different kinds of WCSAxes instances
"""
import numpy as np
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredEllipse, AnchoredSizeBar
import astropy.units as u
from astropy.wcs.utils import proj_plane_pixel_scales
__all__ = ["add_beam", "add_scalebar"]
CORNERS = {
"top right": 1,
"top left": 2,
"bottom left": 3,
"bottom right": 4,
"right": 5,
"left": 6,
"bottom": 8,
"top": 9,
}
def add_beam(
ax,
header=None,
major=None,
minor=None,
angle=None,
corner="bottom left",
frame=False,
borderpad=0.4,
pad=0.5,
**kwargs,
):
"""
Display the beam shape and size
Parameters
----------
ax : :class:`~astropy.visualization.wcsaxes.WCSAxes`
WCSAxes instance in which the beam shape and size is displayed. The WCS
must be celestial.
header : :class:`~astropy.io.fits.Header`, optional
Header containing the beam parameters. If specified, the ``BMAJ``,
``BMIN``, and ``BPA`` keywords will be searched in the FITS header
to set the major and minor axes and the position angle on the sky.
major : float or :class:`~astropy.units.Quantity`, optional
Major axis of the beam in degrees or an angular quantity.
minor : float, or :class:`~astropy.units.Quantity`, optional
Minor axis of the beam in degrees or an angular quantity.
angle : float or :class:`~astropy.units.Quantity`, optional
Position angle of the beam on the sky in degrees or an angular
quantity in the anticlockwise direction.
corner : str, optional
The beam location. Acceptable values are ``'left'``, ``'right'``,
``'top'``, 'bottom', ``'top left'``, ``'top right'``, ``'bottom left'``
(default), and ``'bottom right'``.
frame : bool, optional
Whether to display a frame behind the beam (default is ``False``).
borderpad : float, optional
Border padding, in fraction of the font size. Default is 0.4.
pad : float, optional
Padding around the beam, in fraction of the font size. Default is 0.5.
kwargs
Additional arguments are passed to :class:`matplotlib.patches.Ellipse`.
Notes
-----
This function may be inaccurate when:
- The pixel scales at the reference pixel are different from the pixel scales
within the image extent (e.g., when the reference pixel is well outside of
the image extent and the projection is non-linear)
- The pixel scales in the two directions are very different from each other
(e.g., rectangular pixels)
"""
if header and major:
raise ValueError(
"Either header or major/minor/angle must be specified, not both."
)
if header:
major = header["BMAJ"]
minor = header["BMIN"]
angle = header["BPA"]
if isinstance(major, u.Quantity):
major = major.to(u.degree).value
if isinstance(minor, u.Quantity):
minor = minor.to(u.degree).value
if isinstance(angle, u.Quantity):
angle = angle.to(u.degree).value
if ax.wcs.is_celestial:
pix_scale = proj_plane_pixel_scales(ax.wcs)
sx = pix_scale[0]
sy = pix_scale[1]
degrees_per_pixel = np.sqrt(sx * sy)
else:
raise ValueError("Cannot show beam when WCS is not celestial")
minor /= degrees_per_pixel
major /= degrees_per_pixel
corner = CORNERS[corner]
beam = AnchoredEllipse(
ax.transData,
width=minor,
height=major,
angle=angle,
loc=corner,
pad=pad,
borderpad=borderpad,
frameon=frame,
)
beam.ellipse.set(**kwargs)
ax.add_artist(beam)
def add_scalebar(
ax,
length,
label=None,
corner="bottom right",
frame=False,
borderpad=0.4,
pad=0.5,
**kwargs,
):
"""Add a scale bar
Parameters
----------
ax : :class:`~astropy.visualization.wcsaxes.WCSAxes`
WCSAxes instance in which the scale bar is displayed. The WCS must be
celestial.
length : float or :class:`~astropy.units.Quantity`
The length of the scalebar in degrees or an angular quantity
label : str, optional
Label to place below the scale bar
corner : str, optional
Where to place the scale bar. Acceptable values are:, ``'left'``,
``'right'``, ``'top'``, ``'bottom'``, ``'top left'``, ``'top right'``,
``'bottom left'`` and ``'bottom right'`` (default)
frame : bool, optional
Whether to display a frame behind the scale bar (default is ``False``)
borderpad : float, optional
Border padding, in fraction of the font size. Default is 0.4.
pad : float, optional
Padding around the scale bar, in fraction of the font size. Default is 0.5.
kwargs
Additional arguments are passed to
:class:`mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar`.
Notes
-----
This function may be inaccurate when:
- The pixel scales at the reference pixel are different from the pixel scales
within the image extent (e.g., when the reference pixel is well outside of
the image extent and the projection is non-linear)
- The pixel scales in the two directions are very different from each other
(e.g., rectangular pixels)
"""
if isinstance(length, u.Quantity):
length = length.to(u.degree).value
if ax.wcs.is_celestial:
pix_scale = proj_plane_pixel_scales(ax.wcs)
sx = pix_scale[0]
sy = pix_scale[1]
degrees_per_pixel = np.sqrt(sx * sy)
else:
raise ValueError("Cannot show scalebar when WCS is not celestial")
length = length / degrees_per_pixel
corner = CORNERS[corner]
scalebar = AnchoredSizeBar(
ax.transData,
length,
label,
corner,
pad=pad,
borderpad=borderpad,
sep=5,
frameon=frame,
**kwargs,
)
ax.add_artist(scalebar)
|
9e46ea0ca6fd6a2604768240e2d0c5a94115c2cacb72ee50b9be3bc5fd830610 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Arithmetic mixin to the NDData class.
import warnings
from copy import deepcopy
import numpy as np
from astropy.nddata.nduncertainty import NDUncertainty
from astropy.units import dimensionless_unscaled
from astropy.utils import format_doc, sharedmethod
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["NDArithmeticMixin"]
# Global so it doesn't pollute the class dict unnecessarily:
# Docstring templates for add, subtract, multiply, divide methods.
_arit_doc = """
Performs {name} by evaluating ``self`` {op} ``operand``.
Parameters
----------
operand, operand2 : `NDData`-like instance
If ``operand2`` is ``None`` or not given it will perform the operation
``self`` {op} ``operand``.
If ``operand2`` is given it will perform ``operand`` {op} ``operand2``.
If the method was called on a class rather than on the instance
``operand2`` must be given.
propagate_uncertainties : `bool` or ``None``, optional
If ``None`` the result will have no uncertainty. If ``False`` the
result will have a copied version of the first operand that has an
uncertainty. If ``True`` the result will have a correctly propagated
uncertainty from the uncertainties of the operands but this assumes
that the uncertainties are `NDUncertainty`-like. Default is ``True``.
.. versionchanged:: 1.2
This parameter must be given as keyword-parameter. Using it as
positional parameter is deprecated.
``None`` was added as valid parameter value.
handle_mask : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no mask. If ``'first_found'`` the
result will have a copied version of the first operand that has a
mask). If it is a callable then the specified callable must
create the results ``mask`` and if necessary provide a copy.
Default is `numpy.logical_or`.
.. versionadded:: 1.2
handle_meta : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no meta. If ``'first_found'`` the
result will have a copied version of the first operand that has a
(not empty) meta. If it is a callable then the specified callable must
create the results ``meta`` and if necessary provide a copy.
Default is ``None``.
.. versionadded:: 1.2
compare_wcs : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no wcs and no comparison between
the wcs of the operands is made. If ``'first_found'`` the
result will have a copied version of the first operand that has a
wcs. If it is a callable then the specified callable must
compare the ``wcs``. The resulting ``wcs`` will be like if ``False``
was given otherwise it raises a ``ValueError`` if the comparison was
not successful. Default is ``'first_found'``.
.. versionadded:: 1.2
uncertainty_correlation : number or `~numpy.ndarray`, optional
The correlation between the two operands is used for correct error
propagation for correlated data as given in:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
Default is 0.
.. versionadded:: 1.2
kwargs :
Any other parameter that should be passed to the callables used.
Returns
-------
result : `~astropy.nddata.NDData`-like
The resulting dataset
Notes
-----
If a ``callable`` is used for ``mask``, ``wcs`` or ``meta`` the
callable must accept the corresponding attributes as first two
parameters. If the callable also needs additional parameters these can be
defined as ``kwargs`` and must start with ``"wcs_"`` (for wcs callable) or
``"meta_"`` (for meta callable). This startstring is removed before the
callable is called.
``"first_found"`` can also be abbreviated with ``"ff"``.
"""
class NDArithmeticMixin:
"""
Mixin class to add arithmetic to an NDData object.
When subclassing, be sure to list the superclasses in the correct order
so that the subclass sees NDData as the main superclass. See
`~astropy.nddata.NDDataArray` for an example.
Notes
-----
This class only aims at covering the most common cases so there are certain
restrictions on the saved attributes::
- ``uncertainty`` : has to be something that has a `NDUncertainty`-like
interface for uncertainty propagation
- ``mask`` : has to be something that can be used by a bitwise ``or``
operation.
- ``wcs`` : has to implement a way of comparing with ``=`` to allow
the operation.
But there is a workaround that allows to disable handling a specific
attribute and to simply set the results attribute to ``None`` or to
copy the existing attribute (and neglecting the other).
For example for uncertainties not representing an `NDUncertainty`-like
interface you can alter the ``propagate_uncertainties`` parameter in
:meth:`NDArithmeticMixin.add`. ``None`` means that the result will have no
uncertainty, ``False`` means it takes the uncertainty of the first operand
(if this does not exist from the second operand) as the result's
uncertainty. This behavior is also explained in the docstring for the
different arithmetic operations.
Decomposing the units is not attempted, mainly due to the internal mechanics
of `~astropy.units.Quantity`, so the resulting data might have units like
``km/m`` if you divided for example 100km by 5m. So this Mixin has adopted
this behavior.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDArithmeticMixin
>>> class NDDataWithMath(NDArithmeticMixin, NDData):
... pass
Using it with one operand on an instance::
>>> ndd = NDDataWithMath(100)
>>> ndd.add(20)
NDDataWithMath(120)
Using it with two operand on an instance::
>>> ndd = NDDataWithMath(-4)
>>> ndd.divide(1, ndd)
NDDataWithMath(-0.25)
Using it as classmethod requires two operands::
>>> NDDataWithMath.subtract(5, 4)
NDDataWithMath(1)
"""
def _arithmetic(
self,
operation,
operand,
propagate_uncertainties=True,
handle_mask=np.logical_or,
handle_meta=None,
uncertainty_correlation=0,
compare_wcs="first_found",
**kwds,
):
"""
Base method which calculates the result of the arithmetic operation.
This method determines the result of the arithmetic operation on the
``data`` including their units and then forwards to other methods
to calculate the other properties for the result (like uncertainty).
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide`.
operand : same type (class) as self
see :meth:`NDArithmeticMixin.add`
propagate_uncertainties : `bool` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_mask : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_meta : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
compare_wcs : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
uncertainty_correlation : ``Number`` or `~numpy.ndarray`, optional
see :meth:`NDArithmeticMixin.add`
kwargs :
Any other parameter that should be passed to the
different :meth:`NDArithmeticMixin._arithmetic_mask` (or wcs, ...)
methods.
Returns
-------
result : ndarray or `~astropy.units.Quantity`
The resulting data as array (in case both operands were without
unit) or as quantity if at least one had a unit.
kwargs : `dict`
The kwargs should contain all the other attributes (besides data
and unit) needed to create a new instance for the result. Creating
the new instance is up to the calling method, for example
:meth:`NDArithmeticMixin.add`.
"""
# Find the appropriate keywords for the appropriate method (not sure
# if data and uncertainty are ever used ...)
kwds2 = {"mask": {}, "meta": {}, "wcs": {}, "data": {}, "uncertainty": {}}
for i in kwds:
splitted = i.split("_", 1)
try:
kwds2[splitted[0]][splitted[1]] = kwds[i]
except KeyError:
raise KeyError(f"Unknown prefix {splitted[0]} for parameter {i}")
kwargs = {}
# First check that the WCS allows the arithmetic operation
if compare_wcs is None:
kwargs["wcs"] = None
elif compare_wcs in ["ff", "first_found"]:
if self.wcs is None:
kwargs["wcs"] = deepcopy(operand.wcs)
else:
kwargs["wcs"] = deepcopy(self.wcs)
else:
kwargs["wcs"] = self._arithmetic_wcs(
operation, operand, compare_wcs, **kwds2["wcs"]
)
# Then calculate the resulting data (which can but not needs to be a
# quantity)
result = self._arithmetic_data(operation, operand, **kwds2["data"])
# Determine the other properties
if propagate_uncertainties is None:
kwargs["uncertainty"] = None
elif not propagate_uncertainties:
if self.uncertainty is None:
kwargs["uncertainty"] = deepcopy(operand.uncertainty)
else:
kwargs["uncertainty"] = deepcopy(self.uncertainty)
else:
kwargs["uncertainty"] = self._arithmetic_uncertainty(
operation,
operand,
result,
uncertainty_correlation,
**kwds2["uncertainty"],
)
# If both are None, there is nothing to do.
if self.psf is not None or operand.psf is not None:
warnings.warn(
f"Not setting psf attribute during {operation.__name__}.",
AstropyUserWarning,
)
if handle_mask is None:
kwargs["mask"] = None
elif handle_mask in ["ff", "first_found"]:
if self.mask is None:
kwargs["mask"] = deepcopy(operand.mask)
else:
kwargs["mask"] = deepcopy(self.mask)
else:
kwargs["mask"] = self._arithmetic_mask(
operation, operand, handle_mask, **kwds2["mask"]
)
if handle_meta is None:
kwargs["meta"] = None
elif handle_meta in ["ff", "first_found"]:
if not self.meta:
kwargs["meta"] = deepcopy(operand.meta)
else:
kwargs["meta"] = deepcopy(self.meta)
else:
kwargs["meta"] = self._arithmetic_meta(
operation, operand, handle_meta, **kwds2["meta"]
)
# Wrap the individual results into a new instance of the same class.
return result, kwargs
def _arithmetic_data(self, operation, operand, **kwds):
"""
Calculate the resulting data
Parameters
----------
operation : callable
see `NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
kwds :
Additional parameters.
Returns
-------
result_data : ndarray or `~astropy.units.Quantity`
If both operands had no unit the resulting data is a simple numpy
array, but if any of the operands had a unit the return is a
Quantity.
"""
# Do the calculation with or without units
if self.unit is None and operand.unit is None:
result = operation(self.data, operand.data)
elif self.unit is None:
result = operation(
self.data << dimensionless_unscaled, operand.data << operand.unit
)
elif operand.unit is None:
result = operation(
self.data << self.unit, operand.data << dimensionless_unscaled
)
else:
result = operation(self.data << self.unit, operand.data << operand.unit)
return result
def _arithmetic_uncertainty(self, operation, operand, result, correlation, **kwds):
"""
Calculate the resulting uncertainty.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
result : `~astropy.units.Quantity` or `~numpy.ndarray`
The result of :meth:`NDArithmeticMixin._arithmetic_data`.
correlation : number or `~numpy.ndarray`
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters.
Returns
-------
result_uncertainty : `NDUncertainty` subclass instance or None
The resulting uncertainty already saved in the same `NDUncertainty`
subclass that ``self`` had (or ``operand`` if self had no
uncertainty). ``None`` only if both had no uncertainty.
"""
# Make sure these uncertainties are NDUncertainties so this kind of
# propagation is possible.
if self.uncertainty is not None and not isinstance(
self.uncertainty, NDUncertainty
):
raise TypeError(
"Uncertainty propagation is only defined for "
"subclasses of NDUncertainty."
)
if operand.uncertainty is not None and not isinstance(
operand.uncertainty, NDUncertainty
):
raise TypeError(
"Uncertainty propagation is only defined for "
"subclasses of NDUncertainty."
)
# Now do the uncertainty propagation
# TODO: There is no enforced requirement that actually forbids the
# uncertainty to have negative entries but with correlation the
# sign of the uncertainty DOES matter.
if self.uncertainty is None and operand.uncertainty is None:
# Neither has uncertainties so the result should have none.
return None
elif self.uncertainty is None:
# Create a temporary uncertainty to allow uncertainty propagation
# to yield the correct results. (issue #4152)
self.uncertainty = operand.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(
operation, operand, result, correlation
)
# Delete the temporary uncertainty again.
self.uncertainty = None
return result_uncert
elif operand.uncertainty is None:
# As with self.uncertainty is None but the other way around.
operand.uncertainty = self.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(
operation, operand, result, correlation
)
operand.uncertainty = None
return result_uncert
else:
# Both have uncertainties so just propagate.
return self.uncertainty.propagate(operation, operand, result, correlation)
def _arithmetic_mask(self, operation, operand, handle_mask, **kwds):
"""
Calculate the resulting mask
This is implemented as the piecewise ``or`` operation if both have a
mask.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_mask : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_mask``.
Returns
-------
result_mask : any type
If only one mask was present this mask is returned.
If neither had a mask ``None`` is returned. Otherwise
``handle_mask`` must create (and copy) the returned mask.
"""
# If only one mask is present we need not bother about any type checks
if self.mask is None and operand.mask is None:
return None
elif self.mask is None:
# Make a copy so there is no reference in the result.
return deepcopy(operand.mask)
elif operand.mask is None:
return deepcopy(self.mask)
else:
# Now lets calculate the resulting mask (operation enforces copy)
return handle_mask(self.mask, operand.mask, **kwds)
def _arithmetic_wcs(self, operation, operand, compare_wcs, **kwds):
"""
Calculate the resulting wcs.
There is actually no calculation involved but it is a good place to
compare wcs information of both operands. This is currently not working
properly with `~astropy.wcs.WCS` (which is the suggested class for
storing as wcs property) but it will not break it neither.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData` instance or subclass
The second operand wrapped in an instance of the same class as
self.
compare_wcs : callable
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters given to ``compare_wcs``.
Raises
------
ValueError
If ``compare_wcs`` returns ``False``.
Returns
-------
result_wcs : any type
The ``wcs`` of the first operand is returned.
"""
# ok, not really arithmetic but we need to check which wcs makes sense
# for the result and this is an ideal place to compare the two WCS,
# too.
# I'll assume that the comparison returned None or False in case they
# are not equal.
if not compare_wcs(self.wcs, operand.wcs, **kwds):
raise ValueError("WCS are not equal.")
return deepcopy(self.wcs)
def _arithmetic_meta(self, operation, operand, handle_meta, **kwds):
"""
Calculate the resulting meta.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_meta : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_meta``.
Returns
-------
result_meta : any type
The result of ``handle_meta``.
"""
# Just return what handle_meta does with both of the metas.
return handle_meta(self.meta, operand.meta, **kwds)
@sharedmethod
@format_doc(_arit_doc, name="addition", op="+")
def add(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(np.add, operand, operand2, **kwargs)
@sharedmethod
@format_doc(_arit_doc, name="subtraction", op="-")
def subtract(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.subtract, operand, operand2, **kwargs
)
@sharedmethod
@format_doc(_arit_doc, name="multiplication", op="*")
def multiply(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.multiply, operand, operand2, **kwargs
)
@sharedmethod
@format_doc(_arit_doc, name="division", op="/")
def divide(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.true_divide, operand, operand2, **kwargs
)
@sharedmethod
def _prepare_then_do_arithmetic(
self_or_cls, operation, operand, operand2, **kwargs
):
"""Intermediate method called by public arithmetic (i.e. ``add``)
before the processing method (``_arithmetic``) is invoked.
.. warning::
Do not override this method in subclasses.
This method checks if it was called as instance or as class method and
then wraps the operands and the result from ``_arithmetic`` in the
appropriate subclass.
Parameters
----------
self_or_cls : instance or class
``sharedmethod`` behaves like a normal method if called on the
instance (then this parameter is ``self``) but like a classmethod
when called on the class (then this parameter is ``cls``).
operations : callable
The operation (normally a numpy-ufunc) that represents the
appropriate action.
operand, operand2, kwargs :
See for example ``add``.
Result
------
result : `~astropy.nddata.NDData`-like
Depending how this method was called either ``self_or_cls``
(called on class) or ``self_or_cls.__class__`` (called on instance)
is the NDData-subclass that is used as wrapper for the result.
"""
# DO NOT OVERRIDE THIS METHOD IN SUBCLASSES.
if isinstance(self_or_cls, NDArithmeticMixin):
# True means it was called on the instance, so self_or_cls is
# a reference to self
cls = self_or_cls.__class__
if operand2 is None:
# Only one operand was given. Set operand2 to operand and
# operand to self so that we call the appropriate method of the
# operand.
operand2 = operand
operand = self_or_cls
else:
# Convert the first operand to the class of this method.
# This is important so that always the correct _arithmetics is
# called later that method.
operand = cls(operand)
else:
# It was used as classmethod so self_or_cls represents the cls
cls = self_or_cls
# It was called on the class so we expect two operands!
if operand2 is None:
raise TypeError(
"operand2 must be given when the method isn't "
"called on an instance."
)
# Convert to this class. See above comment why.
operand = cls(operand)
# At this point operand, operand2, kwargs and cls are determined.
# Let's try to convert operand2 to the class of operand to allows for
# arithmetic operations with numbers, lists, numpy arrays, numpy masked
# arrays, astropy quantities, masked quantities and of other subclasses
# of NDData.
operand2 = cls(operand2)
# Now call the _arithmetics method to do the arithmetic.
result, init_kwds = operand._arithmetic(operation, operand2, **kwargs)
# Return a new class based on the result
return cls(result, **init_kwds)
|
9c978b43b71432527a351910ffdf73c99b1e736bf5365660d738c0629d43c58c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from astropy import units as u
from astropy.nddata import NDDataRef
from astropy.nddata import _testing as nd_testing
from astropy.nddata.nduncertainty import (
IncompatibleUncertaintiesException,
InverseVariance,
StdDevUncertainty,
UnknownUncertainty,
VarianceUncertainty,
)
from astropy.units import Quantity, UnitsError
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs import WCS
# Alias NDDataAllMixins in case this will be renamed ... :-)
NDDataArithmetic = NDDataRef
class StdDevUncertaintyUncorrelated(StdDevUncertainty):
@property
def supports_correlated(self):
return False
# Test with Data covers:
# scalars, 1D, 2D and 3D
# broadcasting between them
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5), np.array(10)),
(np.array(5), np.arange(10)),
(np.array(5), np.arange(10).reshape(2, 5)),
(np.arange(10), np.ones(10) * 2),
(np.arange(10), np.ones((10, 10)) * 2),
(np.arange(10).reshape(2, 5), np.ones((2, 5)) * 3),
(np.arange(1000).reshape(20, 5, 10), np.ones((20, 5, 10)) * 3),
],
)
def test_arithmetics_data(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition
nd3 = nd1.add(nd2)
assert_array_equal(data1 + data2, nd3.data)
# Subtraction
nd4 = nd1.subtract(nd2)
assert_array_equal(data1 - data2, nd4.data)
# Multiplication
nd5 = nd1.multiply(nd2)
assert_array_equal(data1 * data2, nd5.data)
# Division
nd6 = nd1.divide(nd2)
assert_array_equal(data1 / data2, nd6.data)
for nd in [nd3, nd4, nd5, nd6]:
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Invalid arithmetic operations for data covering:
# not broadcastable data
def test_arithmetics_data_invalid():
nd1 = NDDataArithmetic([1, 2, 3])
nd2 = NDDataArithmetic([1, 2])
with pytest.raises(ValueError):
nd1.add(nd2)
# Test with Data and unit and covers:
# identical units (even dimensionless unscaled vs. no unit),
# equivalent units (such as meter and kilometer)
# equivalent composite units (such as m/s and km/h)
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5) * u.s, np.array(10) * u.s),
(np.array(5) * u.s, np.arange(10) * u.h),
(np.array(5) * u.s, np.arange(10).reshape(2, 5) * u.min),
(np.arange(10) * u.m / u.s, np.ones(10) * 2 * u.km / u.s),
(np.arange(10) * u.m / u.s, np.ones((10, 10)) * 2 * u.m / u.h),
(np.arange(10).reshape(2, 5) * u.m / u.s, np.ones((2, 5)) * 3 * u.km / u.h),
(
np.arange(1000).reshape(20, 5, 10),
np.ones((20, 5, 10)) * 3 * u.dimensionless_unscaled,
),
(np.array(5), np.array(10) * u.s / u.h),
],
)
def test_arithmetics_data_unit_identical(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition
nd3 = nd1.add(nd2)
ref = data1 + data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd3.data)
assert nd3.unit == ref_unit
# Subtraction
nd4 = nd1.subtract(nd2)
ref = data1 - data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd4.data)
assert nd4.unit == ref_unit
# Multiplication
nd5 = nd1.multiply(nd2)
ref = data1 * data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd5.data)
assert nd5.unit == ref_unit
# Division
nd6 = nd1.divide(nd2)
ref = data1 / data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd6.data)
assert nd6.unit == ref_unit
for nd in [nd3, nd4, nd5, nd6]:
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Test with Data and unit and covers:
# not identical not convertible units
# one with unit (which is not dimensionless) and one without
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5) * u.s, np.array(10) * u.m),
(np.array(5) * u.Mpc, np.array(10) * u.km / u.s),
(np.array(5) * u.Mpc, np.array(10)),
(np.array(5), np.array(10) * u.s),
],
)
def test_arithmetics_data_unit_not_identical(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition should not be possible
with pytest.raises(UnitsError):
nd1.add(nd2)
# Subtraction should not be possible
with pytest.raises(UnitsError):
nd1.subtract(nd2)
# Multiplication is possible
nd3 = nd1.multiply(nd2)
ref = data1 * data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd3.data)
assert nd3.unit == ref_unit
# Division is possible
nd4 = nd1.divide(nd2)
ref = data1 / data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd4.data)
assert nd4.unit == ref_unit
for nd in [nd3, nd4]:
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Tests with wcs (not very sensible because there is no operation between them
# covering:
# both set and identical/not identical
# one set
# None set
@pytest.mark.parametrize(
("wcs1", "wcs2"),
[
(None, None),
(None, WCS(naxis=2)),
(WCS(naxis=2), None),
nd_testing.create_two_equal_wcs(naxis=2),
nd_testing.create_two_unequal_wcs(naxis=2),
],
)
def test_arithmetics_data_wcs(wcs1, wcs2):
nd1 = NDDataArithmetic(1, wcs=wcs1)
nd2 = NDDataArithmetic(1, wcs=wcs2)
if wcs1 is None and wcs2 is None:
ref_wcs = None
elif wcs1 is None:
ref_wcs = wcs2
elif wcs2 is None:
ref_wcs = wcs1
else:
ref_wcs = wcs1
# Addition
nd3 = nd1.add(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd3.wcs)
# Subtraction
nd4 = nd1.subtract(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd4.wcs)
# Multiplication
nd5 = nd1.multiply(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd5.wcs)
# Division
nd6 = nd1.divide(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd6.wcs)
for nd in [nd3, nd4, nd5, nd6]:
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.mask is None
# Masks are completely separated in the NDArithmetics from the data so we need
# no correlated tests but covering:
# masks 1D, 2D and mixed cases with broadcasting
@pytest.mark.parametrize(
("mask1", "mask2"),
[
(None, None),
(None, False),
(True, None),
(False, False),
(True, False),
(False, True),
(True, True),
(np.array(False), np.array(True)),
(np.array(False), np.array([0, 1, 0, 1, 1], dtype=np.bool_)),
(np.array(True), np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_)),
(
np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([1, 1, 0, 0, 1], dtype=np.bool_),
),
(
np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_),
),
(
np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_),
),
],
)
def test_arithmetics_data_masks(mask1, mask2):
nd1 = NDDataArithmetic(1, mask=mask1)
nd2 = NDDataArithmetic(1, mask=mask2)
if mask1 is None and mask2 is None:
ref_mask = None
elif mask1 is None:
ref_mask = mask2
elif mask2 is None:
ref_mask = mask1
else:
ref_mask = mask1 | mask2
# Addition
nd3 = nd1.add(nd2)
assert_array_equal(ref_mask, nd3.mask)
# Subtraction
nd4 = nd1.subtract(nd2)
assert_array_equal(ref_mask, nd4.mask)
# Multiplication
nd5 = nd1.multiply(nd2)
assert_array_equal(ref_mask, nd5.mask)
# Division
nd6 = nd1.divide(nd2)
assert_array_equal(ref_mask, nd6.mask)
for nd in [nd3, nd4, nd5, nd6]:
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.wcs is None
# One additional case which can not be easily incorporated in the test above
# what happens if the masks are numpy ndarrays are not broadcastable
def test_arithmetics_data_masks_invalid():
nd1 = NDDataArithmetic(1, mask=np.array([1, 0], dtype=np.bool_))
nd2 = NDDataArithmetic(1, mask=np.array([1, 0, 1], dtype=np.bool_))
with pytest.raises(ValueError):
nd1.add(nd2)
with pytest.raises(ValueError):
nd1.multiply(nd2)
with pytest.raises(ValueError):
nd1.subtract(nd2)
with pytest.raises(ValueError):
nd1.divide(nd2)
# Covering:
# both have uncertainties (data and uncertainty without unit)
# tested against manually determined resulting uncertainties to verify the
# implemented formulas
# this test only works as long as data1 and data2 do not contain any 0
def test_arithmetics_stddevuncertainty_basic():
nd1 = NDDataArithmetic([1, 2, 3], uncertainty=StdDevUncertainty([1, 1, 3]))
nd2 = NDDataArithmetic([2, 2, 2], uncertainty=StdDevUncertainty([2, 2, 2]))
nd3 = nd1.add(nd2)
nd4 = nd2.add(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(np.array([1, 1, 3]) ** 2 + np.array([2, 2, 2]) ** 2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2)
nd4 = nd2.subtract(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty (same as for add)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2)
nd4 = nd2.multiply(nd1)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.abs(np.array([2, 4, 6])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2)
nd4 = nd2.divide(nd1)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = np.abs(np.array([1 / 2, 2 / 2, 3 / 2])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = np.abs(np.array([2, 1, 2 / 3])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_stddevuncertainty_basic_with_correlation(cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1)
uncert2 = np.array([2, 2, 2])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(
uncert1**2 + uncert2**2 + 2 * cor * np.abs(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(
uncert1**2 + uncert2**2 - 2 * cor * np.abs(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (np.abs(data1 * data2)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
+ (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (np.abs(data1 / data2)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
- (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (np.abs(data2 / data1)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
- (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_varianceuncertainty_basic_with_correlation(cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1) ** 2
uncert2 = np.array([2, 2, 2]) ** 2
nd1 = NDDataArithmetic(data1, uncertainty=VarianceUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=VarianceUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = uncert1 + uncert2 + 2 * cor * np.sqrt(uncert1 * uncert2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = uncert1 + uncert2 - 2 * cor * np.sqrt(uncert1 * uncert2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (data1 * data2) ** 2 * (
uncert1 / data1**2
+ uncert2 / data2**2
+ (2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
uncert1 / data1**2
+ uncert2 / data2**2
- (2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (data1 / data2) ** 2 * ref_common
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (data2 / data1) ** 2 * ref_common
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_inversevarianceuncertainty_basic_with_correlation(
cor, uncert1, data2
):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = 1 / np.array(uncert1) ** 2
uncert2 = 1 / np.array([2, 2, 2]) ** 2
nd1 = NDDataArithmetic(data1, uncertainty=InverseVariance(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=InverseVariance(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
1 / uncert1 + 1 / uncert2 + 2 * cor / np.sqrt(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
1 / uncert1 + 1 / uncert2 - 2 * cor / np.sqrt(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
(data1 * data2) ** 2
* (
1 / uncert1 / data1**2
+ 1 / uncert2 / data2**2
+ (2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
1 / uncert1 / data1**2
+ 1 / uncert2 / data2**2
- (2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = 1 / ((data1 / data2) ** 2 * ref_common)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = 1 / ((data2 / data1) ** 2 * ref_common)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Covering:
# just an example that a np.ndarray works as correlation, no checks for
# the right result since these were basically done in the function above.
def test_arithmetics_stddevuncertainty_basic_with_correlation_array():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = np.array([0, 0.25, 0])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# That propagate throws an exception when correlation is given but the
# uncertainty does not support correlation.
def test_arithmetics_with_correlation_unsupported():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = 3
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertaintyUncorrelated(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertaintyUncorrelated(uncert2))
with pytest.raises(ValueError):
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# only one has an uncertainty (data and uncertainty without unit)
# tested against the case where the other one has zero uncertainty. (this case
# must be correct because we tested it in the last case)
# Also verify that if the result of the data has negative values the resulting
# uncertainty has no negative values.
def test_arithmetics_stddevuncertainty_one_missing():
nd1 = NDDataArithmetic([1, -2, 3])
nd1_ref = NDDataArithmetic([1, -2, 3], uncertainty=StdDevUncertainty([0, 0, 0]))
nd2 = NDDataArithmetic([2, 2, -2], uncertainty=StdDevUncertainty([2, 2, 2]))
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2.add(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2.subtract(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2.multiply(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2.divide(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_stddevuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = StdDevUncertainty(uncert1)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit)
else:
uncert1_ref = uncert1
uncert_ref1 = StdDevUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = StdDevUncertainty(uncert2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit)
else:
uncert2_ref = uncert2
uncert_ref2 = StdDevUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_varianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = VarianceUncertainty(uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = VarianceUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = VarianceUncertainty(uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = VarianceUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_inversevarianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = InverseVariance(1 / uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(1 / data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = InverseVariance(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = InverseVariance(1 / uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(1 / data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = InverseVariance(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Test abbreviation and long name for taking the first found meta, mask, wcs
@pytest.mark.parametrize("use_abbreviation", ["ff", "first_found"])
def test_arithmetics_handle_switches(use_abbreviation):
meta1 = {"a": 1}
meta2 = {"b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1, wcs2 = nd_testing.create_two_unequal_wcs(naxis=1)
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(
data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1
)
nd2 = NDDataArithmetic(
data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2
)
nd3 = NDDataArithmetic(data1)
# Both have the attributes but option None is chosen
nd_ = nd1.add(
nd2,
propagate_uncertainties=None,
handle_meta=None,
handle_mask=None,
compare_wcs=None,
)
assert nd_.wcs is None
assert len(nd_.meta) == 0
assert nd_.mask is None
assert nd_.uncertainty is None
# Only second has attributes and False is chosen
nd_ = nd3.add(
nd2,
propagate_uncertainties=False,
handle_meta=use_abbreviation,
handle_mask=use_abbreviation,
compare_wcs=use_abbreviation,
)
nd_testing.assert_wcs_seem_equal(nd_.wcs, wcs2)
assert nd_.meta == meta2
assert nd_.mask == mask2
assert_array_equal(nd_.uncertainty.array, uncertainty2.array)
# Only first has attributes and False is chosen
nd_ = nd1.add(
nd3,
propagate_uncertainties=False,
handle_meta=use_abbreviation,
handle_mask=use_abbreviation,
compare_wcs=use_abbreviation,
)
nd_testing.assert_wcs_seem_equal(nd_.wcs, wcs1)
assert nd_.meta == meta1
assert nd_.mask == mask1
assert_array_equal(nd_.uncertainty.array, uncertainty1.array)
def test_arithmetics_meta_func():
def meta_fun_func(meta1, meta2, take="first"):
if take == "first":
return meta1
else:
return meta2
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_meta=meta_fun_func)
assert nd3.meta["a"] == 1
assert "b" not in nd3.meta
nd4 = nd1.add(nd2, handle_meta=meta_fun_func, meta_take="second")
assert nd4.meta["a"] == 3
assert nd4.meta["b"] == 2
with pytest.raises(KeyError):
nd1.add(nd2, handle_meta=meta_fun_func, take="second")
def test_arithmetics_wcs_func():
def wcs_comp_func(wcs1, wcs2, tolerance=0.1):
if tolerance < 0.01:
return False
return True
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=1)
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(
data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1
)
nd2 = NDDataArithmetic(
data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2
)
nd3 = nd1.add(nd2, compare_wcs=wcs_comp_func)
nd_testing.assert_wcs_seem_equal(nd3.wcs, wcs1)
# Fails because the function fails
with pytest.raises(ValueError):
nd1.add(nd2, compare_wcs=wcs_comp_func, wcs_tolerance=0.00001)
# Fails because for a parameter to be passed correctly to the function it
# needs the wcs_ prefix
with pytest.raises(KeyError):
nd1.add(nd2, compare_wcs=wcs_comp_func, tolerance=1)
def test_arithmetics_mask_func():
def mask_sad_func(mask1, mask2, fun=0):
if fun > 0.5:
return mask2
else:
return mask1
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = [True, False, True]
mask2 = [True, False, False]
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_mask=mask_sad_func)
assert_array_equal(nd3.mask, nd1.mask)
nd4 = nd1.add(nd2, handle_mask=mask_sad_func, mask_fun=1)
assert_array_equal(nd4.mask, nd2.mask)
with pytest.raises(KeyError):
nd1.add(nd2, handle_mask=mask_sad_func, fun=1)
@pytest.mark.parametrize("meth", ["add", "subtract", "divide", "multiply"])
def test_two_argument_useage(meth):
ndd1 = NDDataArithmetic(np.ones((3, 3)))
ndd2 = NDDataArithmetic(np.ones((3, 3)))
# Call add on the class (not the instance) and compare it with already
# tested usage:
ndd3 = getattr(NDDataArithmetic, meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# And the same done on an unrelated instance...
ndd3 = getattr(NDDataArithmetic(-100), meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
@pytest.mark.parametrize("meth", ["add", "subtract", "divide", "multiply"])
def test_two_argument_useage_non_nddata_first_arg(meth):
data1 = 50
data2 = 100
# Call add on the class (not the instance)
ndd3 = getattr(NDDataArithmetic, meth)(data1, data2)
# Compare it with the instance-useage and two identical NDData-like
# classes:
ndd1 = NDDataArithmetic(data1)
ndd2 = NDDataArithmetic(data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# and check it's also working when called on an instance
ndd3 = getattr(NDDataArithmetic(-100), meth)(data1, data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
def test_arithmetics_unknown_uncertainties():
# Not giving any uncertainty class means it is saved as UnknownUncertainty
ndd1 = NDDataArithmetic(
np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3)))
)
ndd2 = NDDataArithmetic(
np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3)) * 2)
)
# There is no way to propagate uncertainties:
with pytest.raises(IncompatibleUncertaintiesException):
ndd1.add(ndd2)
# But it should be possible without propagation
ndd3 = ndd1.add(ndd2, propagate_uncertainties=False)
np.testing.assert_array_equal(ndd1.uncertainty.array, ndd3.uncertainty.array)
ndd4 = ndd1.add(ndd2, propagate_uncertainties=None)
assert ndd4.uncertainty is None
def test_psf_warning():
"""Test that math on objects with a psf warn."""
ndd1 = NDDataArithmetic(np.ones((3, 3)), psf=np.zeros(3))
ndd2 = NDDataArithmetic(np.ones((3, 3)), psf=None)
# no warning if both are None
ndd2.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd2.add(ndd1)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd1)
|
0f581a4d580d8724a84c25ee8fd71bf2086559d272c04103c51ecde33d947192 | # Note that we test the main astropy.wcs.WCS class directly rather than testing
# the mix-in class on its own (since it's not functional without being used as
# a mix-in)
import warnings
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import (
FK5,
ICRS,
ITRS,
EarthLocation,
Galactic,
SkyCoord,
SpectralCoord,
)
from astropy.io.fits import Header
from astropy.io.fits.verify import VerifyWarning
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.units import Quantity
from astropy.units.core import UnitsWarning
from astropy.utils import iers
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs._wcs import __version__ as wcsver
from astropy.wcs.wcs import WCS, FITSFixedWarning, NoConvergence, Sip
from astropy.wcs.wcsapi.fitswcs import VELOCITY_FRAMES, custom_ctype_to_ucd_mapping
###############################################################################
# The following example is the simplest WCS with default values
###############################################################################
WCS_EMPTY = WCS(naxis=1)
WCS_EMPTY.wcs.crpix = [1]
def test_empty():
wcs = WCS_EMPTY
# Low-level API
assert wcs.pixel_n_dim == 1
assert wcs.world_n_dim == 1
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [None]
assert wcs.world_axis_units == [""]
assert wcs.pixel_axis_names == [""]
assert wcs.world_axis_names == [""]
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [("world", 0, "value")]
assert wcs.world_axis_object_classes["world"][0] is Quantity
assert wcs.world_axis_object_classes["world"][1] == ()
assert wcs.world_axis_object_classes["world"][2]["unit"] is u.one
assert_allclose(wcs.pixel_to_world_values(29), 29)
assert_allclose(wcs.array_index_to_world_values(29), 29)
assert np.ndim(wcs.pixel_to_world_values(29)) == 0
assert np.ndim(wcs.array_index_to_world_values(29)) == 0
assert_allclose(wcs.world_to_pixel_values(29), 29)
assert_equal(wcs.world_to_array_index_values(29), (29,))
assert np.ndim(wcs.world_to_pixel_values(29)) == 0
assert np.ndim(wcs.world_to_array_index_values(29)) == 0
# High-level API
coord = wcs.pixel_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = wcs.array_index_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = 15 * u.one
x = wcs.world_to_pixel(coord)
assert_allclose(x, 15.0)
assert np.ndim(x) == 0
i = wcs.world_to_array_index(coord)
assert_equal(i, 15)
assert np.ndim(i) == 0
###############################################################################
# The following example is a simple 2D image with celestial coordinates
###############################################################################
HEADER_SIMPLE_CELESTIAL = """
WCSAXES = 2
CTYPE1 = RA---TAN
CTYPE2 = DEC--TAN
CRVAL1 = 10
CRVAL2 = 20
CRPIX1 = 30
CRPIX2 = 40
CDELT1 = -0.1
CDELT2 = 0.1
CROTA2 = 0.
CUNIT1 = deg
CUNIT2 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring(HEADER_SIMPLE_CELESTIAL, sep="\n"))
def test_simple_celestial():
wcs = WCS_SIMPLE_CELESTIAL
# Low-level API
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ["pos.eq.ra", "pos.eq.dec"]
assert wcs.world_axis_units == ["deg", "deg"]
assert wcs.pixel_axis_names == ["", ""]
assert wcs.world_axis_names == ["", ""]
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], ICRS)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20))
assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20))
assert_allclose(wcs.world_to_pixel_values(10, 20), (29.0, 39.0))
assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29))
# High-level API
coord = wcs.pixel_to_world(29, 39)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = wcs.array_index_to_world(39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = SkyCoord(10, 20, unit="deg", frame="icrs")
x, y = wcs.world_to_pixel(coord)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
i, j = wcs.world_to_array_index(coord)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that if the coordinates are passed in a different frame things still
# work properly
coord_galactic = coord.galactic
x, y = wcs.world_to_pixel(coord_galactic)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
i, j = wcs.world_to_array_index(coord_galactic)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that we can actually index the array
data = np.arange(3600).reshape((60, 60))
coord = SkyCoord(10, 20, unit="deg", frame="icrs")
index = wcs.world_to_array_index(coord)
assert_equal(data[index], 2369)
coord = SkyCoord([10, 12], [20, 22], unit="deg", frame="icrs")
index = wcs.world_to_array_index(coord)
assert_equal(data[index], [2369, 3550])
###############################################################################
# The following example is a spectral cube with axes in an unusual order
###############################################################################
HEADER_SPECTRAL_CUBE = """
WCSAXES = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CNAME1 = Latitude
CNAME2 = Frequency
CNAME3 = Longitude
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep="\n"))
def test_spectral_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_SPECTRAL_CUBE
# Low-level API
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
# High-level API
coord, spec = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord, spec = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord = SkyCoord(25, 10, unit="deg", frame="galactic")
spec = 20 * u.Hz
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
x, y, z = wcs.world_to_pixel(coord, spec)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
x, y, z = wcs.world_to_pixel(spec, coord)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
i, j, k = wcs.world_to_array_index(coord, spec)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
i, j, k = wcs.world_to_array_index(spec, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
HEADER_SPECTRAL_CUBE_NONALIGNED = (
HEADER_SPECTRAL_CUBE.strip()
+ "\n"
+ """
PC2_3 = -0.5
PC3_2 = +0.5
"""
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE_NONALIGNED = WCS(
Header.fromstring(HEADER_SPECTRAL_CUBE_NONALIGNED, sep="\n")
)
def test_spectral_cube_nonaligned():
# Make sure that correlation matrix gets adjusted if there are non-identity
# CD matrix terms.
wcs = WCS_SPECTRAL_CUBE_NONALIGNED
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[
[True, True, True],
[False, True, True],
[True, True, True],
],
)
# NOTE: we check world_axis_object_components and world_axis_object_classes
# again here because in the past this failed when non-aligned axes were
# present, so this serves as a regression test.
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
###############################################################################
# The following example is from Rots et al (2015), Table 5. It represents a
# cube with two spatial dimensions and one time dimension
###############################################################################
HEADER_TIME_CUBE = """
SIMPLE = T / Fits standard
BITPIX = -32 / Bits per pixel
NAXIS = 3 / Number of axes
NAXIS1 = 2048 / Axis length
NAXIS2 = 2048 / Axis length
NAXIS3 = 11 / Axis length
DATE = '2008-10-28T14:39:06' / Date FITS file was generated
OBJECT = '2008 TC3' / Name of the object observed
EXPTIME = 1.0011 / Integration time
MJD-OBS = 54746.02749237 / Obs start
DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date
TELESCOP= 'VISTA' / ESO Telescope Name
INSTRUME= 'VIRCAM' / Instrument used.
TIMESYS = 'UTC' / From Observatory Time System
TREFPOS = 'TOPOCENT' / Topocentric
MJDREF = 54746.0 / Time reference point in MJD
RADESYS = 'ICRS' / Not equinoctal
CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection
CRVAL2 = 2.01824372640628 / RA at ref pixel
CUNIT2 = 'deg' / Angles are degrees always
CRPIX2 = 2956.6 / Pixel coordinate at ref point
CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection
CRVAL1 = 14.8289418840003 / Dec at ref pixel
CUNIT1 = 'deg' / Angles are degrees always
CRPIX1 = -448.2 / Pixel coordinate at ref point
CTYPE3 = 'UTC' / linear time (UTC)
CRVAL3 = 2375.341 / Relative time of first frame
CUNIT3 = 's' / Time unit
CRPIX3 = 1.0 / Pixel coordinate at ref point
CTYPE3A = 'TT' / alternative linear time (TT)
CRVAL3A = 2440.525 / Relative time of first frame
CUNIT3A = 's' / Time unit
CRPIX3A = 1.0 / Pixel coordinate at ref point
OBSGEO-B= -24.6157 / [deg] Tel geodetic latitude (=North)+
OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+
OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid
CRDER3 = 0.0819 / random error in timings from fit
CSYER3 = 0.0100 / absolute time error
PC1_1 = 0.999999971570892 / WCS transform matrix element
PC1_2 = 0.000238449608932 / WCS transform matrix element
PC2_1 = -0.000621542859395 / WCS transform matrix element
PC2_2 = 0.999999806842218 / WCS transform matrix element
CDELT1 = -9.48575432499806E-5 / Axis scale at reference point
CDELT2 = 9.48683176211164E-5 / Axis scale at reference point
CDELT3 = 13.3629 / Axis scale at reference point
PV1_1 = 1. / ZPN linear term
PV1_3 = 42. / ZPN cubic term
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep="\n"))
def test_time_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_TIME_CUBE
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (11, 2048, 2048)
assert wcs.pixel_shape == (2048, 2048, 11)
assert wcs.world_axis_physical_types == ["pos.eq.dec", "pos.eq.ra", "time"]
assert wcs.world_axis_units == ["deg", "deg", "s"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["", "", ""]
assert_equal(
wcs.axis_correlation_matrix,
[[True, True, False], [True, True, False], [False, False, True]],
)
components = wcs.world_axis_object_components
assert components[0] == ("celestial", 1, "spherical.lat.degree")
assert components[1] == ("celestial", 0, "spherical.lon.degree")
assert components[2][:2] == ("time", 0)
assert callable(components[2][2])
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], ICRS)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["time"][0] is Time
assert wcs.world_axis_object_classes["time"][1] == ()
assert wcs.world_axis_object_classes["time"][2] == {}
assert callable(wcs.world_axis_object_classes["time"][3])
assert_allclose(
wcs.pixel_to_world_values(-449.2, 2955.6, 0),
(14.8289418840003, 2.01824372640628, 2375.341),
)
assert_allclose(
wcs.array_index_to_world_values(0, 2955.6, -449.2),
(14.8289418840003, 2.01824372640628, 2375.341),
)
assert_allclose(
wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341),
(-449.2, 2955.6, 0),
)
assert_equal(
wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341),
(0, 2956, -449),
)
# High-level API
coord, time = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
coord, time = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
x, y, z = wcs.world_to_pixel(coord, time)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
# Order of world coordinates shouldn't matter
x, y, z = wcs.world_to_pixel(time, coord)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
i, j, k = wcs.world_to_array_index(coord, time)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
i, j, k = wcs.world_to_array_index(time, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
###############################################################################
# The following tests are to make sure that Time objects are constructed
# correctly for a variety of combinations of WCS keywords
###############################################################################
HEADER_TIME_1D = """
SIMPLE = T
BITPIX = -32
NAXIS = 1
NAXIS1 = 2048
TIMESYS = 'UTC'
TREFPOS = 'TOPOCENT'
MJDREF = 50002.6
CTYPE1 = 'UTC'
CRVAL1 = 5
CUNIT1 = 's'
CRPIX1 = 1.0
CDELT1 = 2
OBSGEO-L= -20
OBSGEO-B= -70
OBSGEO-H= 2530
"""
if Version(wcsver) >= Version("7.1"):
HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n"
@pytest.fixture
def header_time_1d():
return Header.fromstring(HEADER_TIME_1D, sep="\n")
def assert_time_at(header, position, jd1, jd2, scale, format):
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(position)
assert_allclose(time.jd1, jd1, rtol=1e-10)
assert_allclose(time.jd2, jd2, rtol=1e-10)
assert time.format == format
assert time.scale == scale
@pytest.mark.parametrize(
"scale", ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc", "local")
)
def test_time_1d_values(header_time_1d, scale):
# Check that Time objects are instantiated with the correct values,
# scales, and formats.
header_time_1d["CTYPE1"] = scale.upper()
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, "mjd")
def test_time_1d_values_gps(header_time_1d):
# Special treatment for GPS scale
header_time_1d["CTYPE1"] = "GPS"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, "tai", "mjd")
def test_time_1d_values_deprecated(header_time_1d):
# Deprecated (in FITS) scales
header_time_1d["CTYPE1"] = "TDT"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tt", "mjd")
header_time_1d["CTYPE1"] = "IAT"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tai", "mjd")
header_time_1d["CTYPE1"] = "GMT"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "utc", "mjd")
header_time_1d["CTYPE1"] = "ET"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tt", "mjd")
def test_time_1d_values_time(header_time_1d):
header_time_1d["CTYPE1"] = "TIME"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "utc", "mjd")
header_time_1d["TIMESYS"] = "TAI"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tai", "mjd")
@pytest.mark.remote_data
@pytest.mark.parametrize("scale", ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc"))
def test_time_1d_roundtrip(header_time_1d, scale):
# Check that coordinates round-trip
pixel_in = np.arange(3, 10)
header_time_1d["CTYPE1"] = scale.upper()
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d)
# Simple test
time = wcs.pixel_to_world(pixel_in)
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
# Test with an intermediate change to a different scale/format
time = wcs.pixel_to_world(pixel_in).tdb
time.format = "isot"
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
def test_time_1d_high_precision(header_time_1d):
# Case where the MJDREF is split into two for high precision
del header_time_1d["MJDREF"]
header_time_1d["MJDREFI"] = 52000.0
header_time_1d["MJDREFF"] = 1e-11
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
# Here we have to use a very small rtol to really test that MJDREFF is
# taken into account
assert_allclose(time.jd1, 2452001.0, rtol=1e-12)
assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13)
def test_time_1d_location_geodetic(header_time_1d):
# Make sure that the location is correctly returned (geodetic case)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
lon, lat, alt = time.location.to_geodetic()
# FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976
# ellipsoid (https://github.com/astropy/astropy/issues/9420)
assert_allclose(lon.degree, -20)
assert_allclose(lat.degree, -70)
# assert_allclose(alt.to_value(u.m), 2530.)
@pytest.fixture
def header_time_1d_no_obs():
header = Header.fromstring(HEADER_TIME_1D, sep="\n")
del header["OBSGEO-L"]
del header["OBSGEO-B"]
del header["OBSGEO-H"]
return header
def test_time_1d_location_geocentric(header_time_1d_no_obs):
# Make sure that the location is correctly returned (geocentric case)
header = header_time_1d_no_obs
header["OBSGEO-X"] = 10
header["OBSGEO-Y"] = -20
header["OBSGEO-Z"] = 30
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 10)
assert_allclose(y.to_value(u.m), -20)
assert_allclose(z.to_value(u.m), 30)
def test_time_1d_location_geocenter(header_time_1d_no_obs):
header_time_1d_no_obs["TREFPOS"] = "GEOCENTER"
wcs = WCS(header_time_1d_no_obs)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 0)
assert_allclose(y.to_value(u.m), 0)
assert_allclose(z.to_value(u.m), 0)
def test_time_1d_location_missing(header_time_1d_no_obs):
# Check what happens when no location is present
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning,
match=(
"Missing or incomplete observer location "
"information, setting location in Time to None"
),
):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_incomplete(header_time_1d_no_obs):
# Check what happens when location information is incomplete
header_time_1d_no_obs["OBSGEO-L"] = 10.0
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning,
match=(
"Missing or incomplete observer location "
"information, setting location in Time to None"
),
):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_unsupported(header_time_1d_no_obs):
# Check what happens when TREFPOS is unsupported
header_time_1d_no_obs["TREFPOS"] = "BARYCENTER"
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning,
match=(
"Observation location 'barycenter' is not "
"supported, setting location in Time to None"
),
):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_unsupported_ctype(header_time_1d_no_obs):
# For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale
# Case where the MJDREF is split into two for high precision
header_time_1d_no_obs["CTYPE1"] = "UT(WWV)"
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning, match="Dropping unsupported sub-scale WWV from scale UT"
):
time = wcs.pixel_to_world(10)
assert isinstance(time, Time)
###############################################################################
# Extra corner cases
###############################################################################
def test_unrecognized_unit():
# TODO: Determine whether the following behavior is desirable
wcs = WCS(naxis=1)
with pytest.warns(UnitsWarning):
wcs.wcs.cunit = ["bananas // sekonds"]
assert wcs.world_axis_units == ["bananas // sekonds"]
def test_distortion_correlations():
filename = get_pkg_data_filename("../../tests/data/sip.fits")
with pytest.warns(FITSFixedWarning):
w = WCS(filename)
assert_equal(w.axis_correlation_matrix, True)
# Changing PC to an identity matrix doesn't change anything since
# distortions are still present.
w.wcs.pc = [[1, 0], [0, 1]]
assert_equal(w.axis_correlation_matrix, True)
# Nor does changing the name of the axes to make them non-celestial
w.wcs.ctype = ["X", "Y"]
assert_equal(w.axis_correlation_matrix, True)
# However once we turn off the distortions the matrix changes
w.sip = None
assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]])
# If we go back to celestial coordinates then the matrix is all True again
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
assert_equal(w.axis_correlation_matrix, True)
# Or if we change to X/Y but have a non-identity PC
w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]]
w.wcs.ctype = ["X", "Y"]
assert_equal(w.axis_correlation_matrix, True)
def test_custom_ctype_to_ucd_mappings():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["SPAM"]
assert wcs.world_axis_physical_types == [None]
# Check simple behavior
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit"}):
assert wcs.world_axis_physical_types == [None]
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit", "SPAM": "food.spam"}):
assert wcs.world_axis_physical_types == ["food.spam"]
# Check nesting
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit"}):
assert wcs.world_axis_physical_types == ["food.spam"]
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit"}):
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
assert wcs.world_axis_physical_types == ["food.spam"]
# Check priority in nesting
with custom_ctype_to_ucd_mapping({"SPAM": "notfood"}):
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
assert wcs.world_axis_physical_types == ["food.spam"]
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
with custom_ctype_to_ucd_mapping({"SPAM": "notfood"}):
assert wcs.world_axis_physical_types == ["notfood"]
def test_caching_components_and_classes():
# Make sure that when we change the WCS object, the classes and components
# are updated (we use a cache internally, so we need to make sure the cache
# is invalidated if needed)
wcs = WCS_SIMPLE_CELESTIAL.deepcopy()
assert wcs.world_axis_object_components == [
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], ICRS)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
wcs.wcs.radesys = "FK5"
frame = wcs.world_axis_object_classes["celestial"][2]["frame"]
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2000.0
wcs.wcs.equinox = 2010
frame = wcs.world_axis_object_classes["celestial"][2]["frame"]
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2010.0
def test_sub_wcsapi_attributes():
# Regression test for a bug that caused some of the WCS attributes to be
# incorrect when using WCS.sub or WCS.celestial (which is an alias for sub
# with lon/lat types).
wcs = WCS_SPECTRAL_CUBE.deepcopy()
wcs.pixel_shape = (30, 40, 50)
wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
# Use celestial shortcut
wcs_sub1 = wcs.celestial
assert wcs_sub1.pixel_n_dim == 2
assert wcs_sub1.world_n_dim == 2
assert wcs_sub1.array_shape == (50, 30)
assert wcs_sub1.pixel_shape == (30, 50)
assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)]
assert wcs_sub1.world_axis_physical_types == [
"pos.galactic.lat",
"pos.galactic.lon",
]
assert wcs_sub1.world_axis_units == ["deg", "deg"]
assert wcs_sub1.world_axis_names == ["Latitude", "Longitude"]
# Try adding axes
wcs_sub2 = wcs.sub([0, 2, 0])
assert wcs_sub2.pixel_n_dim == 3
assert wcs_sub2.world_n_dim == 3
assert wcs_sub2.array_shape == (None, 40, None)
assert wcs_sub2.pixel_shape == (None, 40, None)
assert wcs_sub2.pixel_bounds == [None, (-2, 18), None]
assert wcs_sub2.world_axis_physical_types == [None, "em.freq", None]
assert wcs_sub2.world_axis_units == ["", "Hz", ""]
assert wcs_sub2.world_axis_names == ["", "Frequency", ""]
# Use strings
wcs_sub3 = wcs.sub(["longitude", "latitude"])
assert wcs_sub3.pixel_n_dim == 2
assert wcs_sub3.world_n_dim == 2
assert wcs_sub3.array_shape == (30, 50)
assert wcs_sub3.pixel_shape == (50, 30)
assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub3.world_axis_physical_types == [
"pos.galactic.lon",
"pos.galactic.lat",
]
assert wcs_sub3.world_axis_units == ["deg", "deg"]
assert wcs_sub3.world_axis_names == ["Longitude", "Latitude"]
# Now try without CNAME set
wcs.wcs.cname = [""] * wcs.wcs.naxis
wcs_sub4 = wcs.sub(["longitude", "latitude"])
assert wcs_sub4.pixel_n_dim == 2
assert wcs_sub4.world_n_dim == 2
assert wcs_sub4.array_shape == (30, 50)
assert wcs_sub4.pixel_shape == (50, 30)
assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub4.world_axis_physical_types == [
"pos.galactic.lon",
"pos.galactic.lat",
]
assert wcs_sub4.world_axis_units == ["deg", "deg"]
assert wcs_sub4.world_axis_names == ["", ""]
HEADER_POLARIZED = """
CTYPE1 = 'HPLT-TAN'
CTYPE2 = 'HPLN-TAN'
CTYPE3 = 'STOKES'
"""
@pytest.fixture
def header_polarized():
return Header.fromstring(HEADER_POLARIZED, sep="\n")
def test_phys_type_polarization(header_polarized):
w = WCS(header_polarized)
assert w.world_axis_physical_types[2] == "phys.polarization.stokes"
###############################################################################
# Spectral transformations
###############################################################################
HEADER_SPECTRAL_FRAMES = """
BUNIT = 'Jy/beam'
EQUINOX = 2.000000000E+03
CTYPE1 = 'RA---SIN'
CRVAL1 = 2.60108333333E+02
CDELT1 = -2.777777845E-04
CRPIX1 = 1.0
CUNIT1 = 'deg'
CTYPE2 = 'DEC--SIN'
CRVAL2 = -9.75000000000E-01
CDELT2 = 2.777777845E-04
CRPIX2 = 1.0
CUNIT2 = 'deg'
CTYPE3 = 'FREQ'
CRVAL3 = 1.37835117405E+09
CDELT3 = 9.765625000E+04
CRPIX3 = 32.0
CUNIT3 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_frames():
return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep="\n")
def test_spectralcoord_frame(header_spectral_frames):
# This is a test to check the numerical results of transformations between
# different velocity frames. We simply make sure that the returned
# SpectralCoords are in the right frame but don't check the transformations
# since this is already done in test_spectralcoord_accuracy
# in astropy.coordinates.
with iers.conf.set_temp("auto_download", False):
obstime = Time("2009-05-04T04:44:23", scale="utc")
header = header_spectral_frames.copy()
header["MJD-OBS"] = obstime.mjd
header["CRVAL1"] = 16.33211
header["CRVAL2"] = -34.2221
header["OBSGEO-L"] = 144.2
header["OBSGEO-B"] = -20.2
header["OBSGEO-H"] = 0.0
# We start off with a WCS defined in topocentric frequency
with pytest.warns(FITSFixedWarning):
wcs_topo = WCS(header)
# We convert a single pixel coordinate to world coordinates and keep only
# the second high level object - a SpectralCoord:
sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1]
# We check that this is in topocentric frame with zero velocities
assert isinstance(sc_topo, SpectralCoord)
assert isinstance(sc_topo.observer, ITRS)
assert sc_topo.observer.obstime.isot == obstime.isot
assert_equal(sc_topo.observer.data.differentials["s"].d_xyz.value, 0)
observatory = (
EarthLocation.from_geodetic(144.2, -20.2)
.get_itrs(obstime=obstime)
.transform_to(ICRS())
)
assert (
observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km
)
for specsys, expected_frame in VELOCITY_FRAMES.items():
header["SPECSYS"] = specsys
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
sc = wcs.pixel_to_world(0, 0, 31)[1]
# Now transform to the expected velocity frame, which should leave
# the spectral coordinate unchanged
sc_check = sc.with_observer_stationary_relative_to(expected_frame)
assert_quantity_allclose(sc.quantity, sc_check.quantity)
@pytest.mark.parametrize(
("ctype3", "observer"),
product(["ZOPT", "BETA", "VELO", "VRAD", "VOPT"], [False, True]),
)
def test_different_ctypes(header_spectral_frames, ctype3, observer):
header = header_spectral_frames.copy()
header["CTYPE3"] = ctype3
header["CRVAL3"] = 0.1
header["CDELT3"] = 0.001
if ctype3[0] == "V":
header["CUNIT3"] = "m s-1"
else:
header["CUNIT3"] = ""
header["RESTWAV"] = 1.420405752e09
header["MJD-OBS"] = 55197
if observer:
header["OBSGEO-L"] = 144.2
header["OBSGEO-B"] = -20.2
header["OBSGEO-H"] = 0.0
header["SPECSYS"] = "BARYCENT"
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31)
assert isinstance(spectralcoord, SpectralCoord)
if observer:
pix = wcs.world_to_pixel(skycoord, spectralcoord)
else:
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
pix = wcs.world_to_pixel(skycoord, spectralcoord)
assert_allclose(pix, [0, 0, 31], rtol=1e-6, atol=1e-9)
def test_non_convergence_warning():
"""Test case for issue #11446
Since we can't define a target accuracy when plotting a WCS `all_world2pix`
should not error but only warn when the default accuracy can't be reached.
"""
# define a minimal WCS where convergence fails for certain image positions
wcs = WCS(naxis=2)
crpix = [0, 0]
a = b = ap = bp = np.zeros((4, 4))
a[3, 0] = -1.20116753e-07
test_pos_x = [1000, 1]
test_pos_y = [0, 2]
wcs.sip = Sip(a, b, ap, bp, crpix)
# first make sure the WCS works when using a low accuracy
expected = wcs.all_world2pix(test_pos_x, test_pos_y, 0, tolerance=1e-3)
# then check that it fails when using the default accuracy
with pytest.raises(NoConvergence):
wcs.all_world2pix(test_pos_x, test_pos_y, 0)
# at last check that world_to_pixel_values raises a warning but returns
# the same 'low accuray' result
with pytest.warns(UserWarning):
assert_allclose(wcs.world_to_pixel_values(test_pos_x, test_pos_y), expected)
HEADER_SPECTRAL_1D = """
CTYPE1 = 'FREQ'
CRVAL1 = 1.37835117405E+09
CDELT1 = 9.765625000E+04
CRPIX1 = 32.0
CUNIT1 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_1d():
return Header.fromstring(HEADER_SPECTRAL_1D, sep="\n")
@pytest.mark.parametrize(
("ctype1", "observer"),
product(["ZOPT", "BETA", "VELO", "VRAD", "VOPT"], [False, True]),
)
def test_spectral_1d(header_spectral_1d, ctype1, observer):
# This is a regression test for issues that happened with 1-d WCS
# where the target is not defined but observer is.
header = header_spectral_1d.copy()
header["CTYPE1"] = ctype1
header["CRVAL1"] = 0.1
header["CDELT1"] = 0.001
if ctype1[0] == "V":
header["CUNIT1"] = "m s-1"
else:
header["CUNIT1"] = ""
header["RESTWAV"] = 1.420405752e09
header["MJD-OBS"] = 55197
if observer:
header["OBSGEO-L"] = 144.2
header["OBSGEO-B"] = -20.2
header["OBSGEO-H"] = 0.0
header["SPECSYS"] = "BARYCENT"
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
# First ensure that transformations round-trip
spectralcoord = wcs.pixel_to_world(31)
assert isinstance(spectralcoord, SpectralCoord)
assert spectralcoord.target is None
assert (spectralcoord.observer is not None) is observer
if observer:
expected_message = "No target defined on SpectralCoord"
else:
expected_message = "No observer defined on WCS"
with pytest.warns(AstropyUserWarning, match=expected_message):
pix = wcs.world_to_pixel(spectralcoord)
assert_allclose(pix, [31], rtol=1e-6)
# Also make sure that we can convert a SpectralCoord on which the observer
# is not defined but the target is.
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spectralcoord_no_obs = SpectralCoord(
spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc),
)
if observer:
expected_message = "No observer defined on SpectralCoord"
else:
expected_message = "No observer defined on WCS"
with pytest.warns(AstropyUserWarning, match=expected_message):
pix2 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix2, [31], rtol=1e-6)
# And finally check case when both observer and target are defined on the
# SpectralCoord
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spectralcoord_no_obs = SpectralCoord(
spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
observer=ICRS(10 * u.deg, 20 * u.deg, distance=0 * u.kpc),
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc),
)
if observer:
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
else:
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix3, [31], rtol=1e-6)
HEADER_SPECTRAL_WITH_TIME = """
WCSAXES = 3
CTYPE1 = 'RA---TAN'
CTYPE2 = 'DEC--TAN'
CTYPE3 = 'WAVE'
CRVAL1 = 98.83153
CRVAL2 = -66.818
CRVAL3 = 6.4205
CRPIX1 = 21.
CRPIX2 = 22.
CRPIX3 = 1.
CDELT1 = 3.6111E-05
CDELT2 = 3.6111E-05
CDELT3 = 0.001
CUNIT1 = 'deg'
CUNIT2 = 'deg'
CUNIT3 = 'um'
MJD-AVG = 59045.41466
RADESYS = 'ICRS'
SPECSYS = 'BARYCENT'
TIMESYS = 'UTC'
"""
@pytest.fixture
def header_spectral_with_time():
return Header.fromstring(HEADER_SPECTRAL_WITH_TIME, sep="\n")
def test_spectral_with_time_kw(header_spectral_with_time):
def check_wcs(header):
assert_allclose(w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval)
sky, spec = w.pixel_to_world(*w.wcs.crpix)
assert_allclose(
(sky.spherical.lon.degree, sky.spherical.lat.degree, spec.value),
w.wcs.crval,
rtol=1e-3,
)
# Check with MJD-AVG and TIMESYS
hdr = header_spectral_with_time.copy()
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
# Make sure the correct keyword is used in a test
assert ~np.isnan(w.wcs.mjdavg)
assert np.isnan(w.wcs.mjdobs)
check_wcs(w)
# Check fall back to MJD-OBS
hdr["MJD-OBS"] = hdr["MJD-AVG"]
del hdr["MJD-AVG"]
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
# Make sure the correct keyword is used in a test
assert ~np.isnan(w.wcs.mjdobs)
assert np.isnan(w.wcs.mjdavg)
check_wcs(w)
# Check fall back to DATE--OBS
hdr["DATE-OBS"] = "2020-07-15"
del hdr["MJD-OBS"]
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
w.wcs.mjdobs = np.nan
# Make sure the correct keyword is used in a test
assert np.isnan(w.wcs.mjdobs)
assert np.isnan(w.wcs.mjdavg)
assert w.wcs.dateobs != ""
check_wcs(hdr)
# Check fall back to scale='utc'
del hdr["TIMESYS"]
check_wcs(hdr)
|
178c676e333c1597848d8400958fdaa3356cf56f5b59ad42562362c936cfceb2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import operator
from datetime import timedelta
from decimal import Decimal
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Table
from astropy.time import (
STANDARD_TIME_SCALES,
TIME_DELTA_SCALES,
TIME_SCALES,
OperandTypeError,
ScaleValueError,
Time,
TimeDelta,
TimeDeltaMissingUnitWarning,
)
from astropy.utils import iers
allclose_jd = functools.partial(np.allclose, rtol=2.0**-52, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
orig_auto_download = iers.conf.auto_download
def setup_module(module):
"""Use offline IERS table only."""
iers.conf.auto_download = False
def teardown_module(module):
"""Restore original setting."""
iers.conf.auto_download = orig_auto_download
class TestTimeDelta:
"""Test TimeDelta class"""
def setup_method(self):
self.t = Time("2010-01-01", scale="utc")
self.t2 = Time("2010-01-02 00:00:01", scale="utc")
self.t3 = Time(
"2010-01-03 01:02:03",
scale="utc",
precision=9,
in_subfmt="date_hms",
out_subfmt="date_hm",
location=(-75.0 * u.degree, 30.0 * u.degree, 500 * u.m),
)
self.t4 = Time("2010-01-01", scale="local")
self.dt = TimeDelta(100.0, format="sec")
self.dt_array = TimeDelta(np.arange(100, 1000, 100), format="sec")
def test_sub(self):
# time - time
dt = self.t2 - self.t
assert repr(dt).startswith(
"<TimeDelta object: scale='tai' format='jd' value=1.00001157407"
)
assert allclose_jd(dt.jd, 86401.0 / 86400.0)
assert allclose_sec(dt.sec, 86401.0)
# time - delta_time
t = self.t2 - dt
assert t.iso == self.t.iso
# delta_time - delta_time
dt2 = dt - self.dt
assert allclose_sec(dt2.sec, 86301.0)
# delta_time - time
with pytest.raises(OperandTypeError):
dt - self.t
def test_add(self):
# time + time
with pytest.raises(OperandTypeError):
self.t2 + self.t
# time + delta_time
dt = self.t2 - self.t
t2 = self.t + dt
assert t2.iso == self.t2.iso
# delta_time + delta_time
dt2 = dt + self.dt
assert allclose_sec(dt2.sec, 86501.0)
# delta_time + time
dt = self.t2 - self.t
t2 = dt + self.t
assert t2.iso == self.t2.iso
def test_add_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format="mjd", scale="tai")
t2 = Time([0.0, 1.0], format="mjd", scale="tai")
dt = TimeDelta(100.0, format="jd")
dt2 = TimeDelta([100.0, 200.0], format="jd")
out = t + dt
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = t + dt2
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = t2 + dt
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt + dt
assert allclose_jd(out.jd, 200.0)
assert out.isscalar
out = dt + dt2
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
# Reverse the argument order
out = dt + t
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = dt2 + t
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = dt + t2
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt2 + dt
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
def test_sub_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format="mjd", scale="tai")
t2 = Time([0.0, 1.0], format="mjd", scale="tai")
dt = TimeDelta(100.0, format="jd")
dt2 = TimeDelta([100.0, 200.0], format="jd")
out = t - dt
assert allclose_jd(out.mjd, -100.0)
assert out.isscalar
out = t - dt2
assert allclose_jd(out.mjd, [-100.0, -200.0])
assert not out.isscalar
out = t2 - dt
assert allclose_jd(out.mjd, [-100.0, -99.0])
assert not out.isscalar
out = dt - dt
assert allclose_jd(out.jd, 0.0)
assert out.isscalar
out = dt - dt2
assert allclose_jd(out.jd, [0.0, -100.0])
assert not out.isscalar
@pytest.mark.parametrize(
"values", [(2455197.5, 2455198.5), ([2455197.5], [2455198.5])]
)
def test_copy_timedelta(self, values):
"""Test copying the values of a TimeDelta object by passing it into the
Time initializer.
"""
val1, val2 = values
t = Time(val1, format="jd", scale="utc")
t2 = Time(val2, format="jd", scale="utc")
dt = t2 - t
dt2 = TimeDelta(dt, copy=False)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is dt2._time.jd1
assert dt._time.jd2 is dt2._time.jd2
dt2 = TimeDelta(dt, copy=True)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is not dt2._time.jd1
assert dt._time.jd2 is not dt2._time.jd2
# Include initializers
dt2 = TimeDelta(dt, format="sec")
assert allclose_sec(dt2.value, 86400.0)
def test_neg_abs(self):
for dt in (self.dt, self.dt_array):
dt2 = -dt
assert np.all(dt2.jd == -dt.jd)
dt3 = abs(dt)
assert np.all(dt3.jd == dt.jd)
dt4 = abs(dt2)
assert np.all(dt4.jd == dt.jd)
def test_mul_div(self):
for dt in (self.dt, self.dt_array):
dt2 = dt + dt + dt
dt3 = 3.0 * dt
assert allclose_jd(dt2.jd, dt3.jd)
dt4 = dt3 / 3.0
assert allclose_jd(dt4.jd, dt.jd)
dt5 = self.dt * np.arange(3)
assert dt5[0].jd == 0.0
assert dt5[-1].jd == (self.dt + self.dt).jd
dt6 = self.dt * [0, 1, 2]
assert np.all(dt6.jd == dt5.jd)
with pytest.raises(OperandTypeError):
self.dt * self.t
with pytest.raises(TypeError):
self.dt * object()
def test_mean(self):
def is_consistent(time_delta: TimeDelta):
mean_expected = (
np.sum(time_delta.jd1) + np.sum(time_delta.jd2)
) / time_delta.size
mean_test = time_delta.mean().jd1 + time_delta.mean().jd2
return mean_test == mean_expected
assert is_consistent(self.dt)
assert is_consistent(self.dt_array)
def test_keep_properties(self):
# closes #1924 (partially)
dt = TimeDelta(1000.0, format="sec")
for t in (self.t, self.t3):
ta = t + dt
assert ta.location is t.location
assert ta.precision == t.precision
assert ta.in_subfmt == t.in_subfmt
assert ta.out_subfmt == t.out_subfmt
tr = dt + t
assert tr.location is t.location
assert tr.precision == t.precision
assert tr.in_subfmt == t.in_subfmt
assert tr.out_subfmt == t.out_subfmt
ts = t - dt
assert ts.location is t.location
assert ts.precision == t.precision
assert ts.in_subfmt == t.in_subfmt
assert ts.out_subfmt == t.out_subfmt
t_tdb = self.t.tdb
assert hasattr(t_tdb, "_delta_tdb_tt")
assert not hasattr(t_tdb, "_delta_ut1_utc")
t_tdb_ut1 = t_tdb.ut1
assert hasattr(t_tdb_ut1, "_delta_tdb_tt")
assert hasattr(t_tdb_ut1, "_delta_ut1_utc")
t_tdb_ut1_utc = t_tdb_ut1.utc
assert hasattr(t_tdb_ut1_utc, "_delta_tdb_tt")
assert hasattr(t_tdb_ut1_utc, "_delta_ut1_utc")
# adding or subtracting some time should remove the delta's
# since these are time-dependent and should be recalculated
for op in (operator.add, operator.sub):
t1 = op(t_tdb, dt)
assert not hasattr(t1, "_delta_tdb_tt")
assert not hasattr(t1, "_delta_ut1_utc")
t2 = op(t_tdb_ut1, dt)
assert not hasattr(t2, "_delta_tdb_tt")
assert not hasattr(t2, "_delta_ut1_utc")
t3 = op(t_tdb_ut1_utc, dt)
assert not hasattr(t3, "_delta_tdb_tt")
assert not hasattr(t3, "_delta_ut1_utc")
def test_set_format(self):
"""
Test basics of setting format attribute.
"""
dt = TimeDelta(86400.0, format="sec")
assert dt.value == 86400.0
assert dt.format == "sec"
dt.format = "jd"
assert dt.value == 1.0
assert dt.format == "jd"
dt.format = "datetime"
assert dt.value == timedelta(days=1)
assert dt.format == "datetime"
def test_from_non_float(self):
dt = TimeDelta("1.000000000000001", format="jd")
assert dt != TimeDelta(1.000000000000001, format="jd") # precision loss.
assert dt == TimeDelta(1, 0.000000000000001, format="jd")
dt2 = TimeDelta(Decimal("1.000000000000001"), format="jd")
assert dt2 == dt
def test_to_value(self):
dt = TimeDelta(86400.0, format="sec")
assert dt.to_value("jd") == 1.0
assert dt.to_value("jd", "str") == "1.0"
assert dt.to_value("sec", subfmt="str") == "86400.0"
with pytest.raises(
ValueError,
match="not one of the known formats.*failed to parse as a unit",
):
dt.to_value("julian")
with pytest.raises(TypeError, match="missing required format or unit"):
dt.to_value()
class TestTimeDeltaScales:
"""Test scale conversion for Time Delta.
Go through @taldcroft's list of expected behavior from #1932"""
def setup_method(self):
# pick a date that includes a leap second for better testing
self.iso_times = [
"2012-06-30 12:00:00",
"2012-06-30 23:59:59",
"2012-07-01 00:00:00",
"2012-07-01 12:00:00",
]
self.t = {
scale: Time(self.iso_times, scale=scale, precision=9)
for scale in TIME_SCALES
}
self.dt = {scale: self.t[scale] - self.t[scale][0] for scale in TIME_SCALES}
def test_delta_scales_definition(self):
for scale in list(TIME_DELTA_SCALES) + [None]:
TimeDelta([0.0, 1.0, 10.0], format="sec", scale=scale)
with pytest.raises(ScaleValueError):
TimeDelta([0.0, 1.0, 10.0], format="sec", scale="utc")
@pytest.mark.parametrize(
("scale1", "scale2"),
list(itertools.product(STANDARD_TIME_SCALES, STANDARD_TIME_SCALES)),
)
def test_standard_scales_for_time_minus_time(self, scale1, scale2):
"""T(X) - T2(Y) -- does T(X) - T2(Y).X and return dT(X)
and T(X) +/- dT(Y) -- does (in essence) (T(X).Y +/- dT(Y)).X
I.e., time differences of two times should have the scale of the
first time. The one exception is UTC, which returns TAI.
There are no standard timescales for which this does not work.
"""
t1 = self.t[scale1]
t2 = self.t[scale2]
dt = t1 - t2
if scale1 in TIME_DELTA_SCALES:
assert dt.scale == scale1
else:
assert scale1 == "utc"
assert dt.scale == "tai"
# now check with delta time; also check reversibility
t1_recover_t2_scale = t2 + dt
assert t1_recover_t2_scale.scale == scale2
t1_recover = getattr(t1_recover_t2_scale, scale1)
assert allclose_jd(t1_recover.jd, t1.jd)
t2_recover_t1_scale = t1 - dt
assert t2_recover_t1_scale.scale == scale1
t2_recover = getattr(t2_recover_t1_scale, scale2)
assert allclose_jd(t2_recover.jd, t2.jd)
def test_local_scales_for_time_minus_time(self):
"""T1(local) - T2(local) should return dT(local)
T1(local) +/- dT(local) or T1(local) +/- Quantity(time-like) should
also return T(local)
I.e. Tests that time differences of two local scale times should
return delta time with local timescale. Furthermore, checks that
arithmetic of T(local) with dT(None) or time-like quantity does work.
Also tests that subtracting two Time objects, one having local time
scale and other having standard time scale should raise TypeError.
"""
t1 = self.t["local"]
t2 = Time("2010-01-01", scale="local")
dt = t1 - t2
assert dt.scale == "local"
# now check with delta time
t1_recover = t2 + dt
assert t1_recover.scale == "local"
assert allclose_jd(t1_recover.jd, t1.jd)
# check that dT(None) can be subtracted from T(local)
dt2 = TimeDelta([10.0], format="sec", scale=None)
t3 = t2 - dt2
assert t3.scale == t2.scale
# check that time quantity can be subtracted from T(local)
q = 10 * u.s
assert (t2 - q).value == (t2 - dt2).value
# Check that one cannot subtract/add times with a standard scale
# from a local one (or vice versa)
t1 = self.t["local"]
for scale in STANDARD_TIME_SCALES:
t2 = self.t[scale]
with pytest.raises(TypeError):
t1 - t2
with pytest.raises(TypeError):
t2 - t1
with pytest.raises(TypeError):
t2 - dt
with pytest.raises(TypeError):
t2 + dt
with pytest.raises(TypeError):
dt + t2
def test_scales_for_delta_minus_delta(self):
"""dT(X) +/- dT2(Y) -- Add/subtract JDs for dT(X) and dT(Y).X
I.e. this will succeed if dT(Y) can be converted to scale X.
Returns delta time in scale X
"""
# geocentric timescales
dt_tai = self.dt["tai"]
dt_tt = self.dt["tt"]
dt0 = dt_tai - dt_tt
assert dt0.scale == "tai"
# tai and tt have the same scale, so differences should be the same
assert allclose_sec(dt0.sec, 0.0)
dt_tcg = self.dt["tcg"]
dt1 = dt_tai - dt_tcg
assert dt1.scale == "tai"
# tai and tcg do not have the same scale, so differences different
assert not allclose_sec(dt1.sec, 0.0)
t_tai_tcg = self.t["tai"].tcg
dt_tai_tcg = t_tai_tcg - t_tai_tcg[0]
dt2 = dt_tai - dt_tai_tcg
assert dt2.scale == "tai"
# but if tcg difference calculated from tai, it should roundtrip
assert allclose_sec(dt2.sec, 0.0)
# check that if we put TCG first, we get a TCG scale back
dt3 = dt_tai_tcg - dt_tai
assert dt3.scale == "tcg"
assert allclose_sec(dt3.sec, 0.0)
for scale in "tdb", "tcb", "ut1":
with pytest.raises(TypeError):
dt_tai - self.dt[scale]
# barycentric timescales
dt_tcb = self.dt["tcb"]
dt_tdb = self.dt["tdb"]
dt4 = dt_tcb - dt_tdb
assert dt4.scale == "tcb"
assert not allclose_sec(dt1.sec, 0.0)
t_tcb_tdb = self.t["tcb"].tdb
dt_tcb_tdb = t_tcb_tdb - t_tcb_tdb[0]
dt5 = dt_tcb - dt_tcb_tdb
assert dt5.scale == "tcb"
assert allclose_sec(dt5.sec, 0.0)
for scale in "utc", "tai", "tt", "tcg", "ut1":
with pytest.raises(TypeError):
dt_tcb - self.dt[scale]
# rotational timescale
dt_ut1 = self.dt["ut1"]
dt5 = dt_ut1 - dt_ut1[-1]
assert dt5.scale == "ut1"
assert dt5[-1].sec == 0.0
for scale in "utc", "tai", "tt", "tcg", "tcb", "tdb":
with pytest.raises(TypeError):
dt_ut1 - self.dt[scale]
# local time scale
dt_local = self.dt["local"]
dt6 = dt_local - dt_local[-1]
assert dt6.scale == "local"
assert dt6[-1].sec == 0.0
for scale in "utc", "tai", "tt", "tcg", "tcb", "tdb", "ut1":
with pytest.raises(TypeError):
dt_local - self.dt[scale]
@pytest.mark.parametrize(
("scale", "op"),
list(itertools.product(TIME_SCALES, (operator.add, operator.sub))),
)
def test_scales_for_delta_scale_is_none(self, scale, op):
"""T(X) +/- dT(None) or T(X) +/- Quantity(time-like)
This is always allowed and just adds JDs, i.e., the scale of
the TimeDelta or time-like Quantity will be taken to be X.
The one exception is again for X=UTC, where TAI is assumed instead,
so that a day is always defined as 86400 seconds.
"""
dt_none = TimeDelta([0.0, 1.0, -1.0, 1000.0], format="sec")
assert dt_none.scale is None
q_time = dt_none.to("s")
dt = self.dt[scale]
dt1 = op(dt, dt_none)
assert dt1.scale == dt.scale
assert allclose_jd(dt1.jd, op(dt.jd, dt_none.jd))
dt2 = op(dt_none, dt)
assert dt2.scale == dt.scale
assert allclose_jd(dt2.jd, op(dt_none.jd, dt.jd))
dt3 = op(q_time, dt)
assert dt3.scale == dt.scale
assert allclose_jd(dt3.jd, dt2.jd)
t = self.t[scale]
t1 = op(t, dt_none)
assert t1.scale == t.scale
assert allclose_jd(t1.jd, op(t.jd, dt_none.jd))
if op is operator.add:
t2 = op(dt_none, t)
assert t2.scale == t.scale
assert allclose_jd(t2.jd, t1.jd)
t3 = op(t, q_time)
assert t3.scale == t.scale
assert allclose_jd(t3.jd, t1.jd)
@pytest.mark.parametrize("scale", TIME_SCALES)
def test_delta_day_is_86400_seconds(self, scale):
"""TimeDelta or Quantity holding 1 day always means 24*60*60 seconds
This holds true for all timescales but UTC, for which leap-second
days are longer or shorter by one second.
"""
t = self.t[scale]
dt_day = TimeDelta(1.0, format="jd")
q_day = dt_day.to("day")
dt_day_leap = t[-1] - t[0]
# ^ = exclusive or, so either equal and not UTC, or not equal and UTC
assert allclose_jd(dt_day_leap.jd, dt_day.jd) ^ (scale == "utc")
t1 = t[0] + dt_day
assert allclose_jd(t1.jd, t[-1].jd) ^ (scale == "utc")
t2 = q_day + t[0]
assert allclose_jd(t2.jd, t[-1].jd) ^ (scale == "utc")
t3 = t[-1] - dt_day
assert allclose_jd(t3.jd, t[0].jd) ^ (scale == "utc")
t4 = t[-1] - q_day
assert allclose_jd(t4.jd, t[0].jd) ^ (scale == "utc")
def test_timedelta_setitem():
t = TimeDelta([1, 2, 3] * u.d, format="jd")
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 86400 * u.s
assert allclose_jd(t.value, [1, 1, 1])
t[1] = TimeDelta(2, format="jd")
assert allclose_jd(t.value, [1, 2, 1])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert "cannot convert value to a compatible TimeDelta" in str(err.value)
def test_timedelta_setitem_sec():
t = TimeDelta([1, 2, 3], format="sec")
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 1 * u.day
assert allclose_jd(t.value, [86400, 86400, 86400])
t[1] = TimeDelta(2, format="jd")
assert allclose_jd(t.value, [86400, 86400 * 2, 86400])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert "cannot convert value to a compatible TimeDelta" in str(err.value)
def test_timedelta_mask():
t = TimeDelta([1, 2] * u.d, format="jd")
t[1] = np.ma.masked
assert np.all(t.mask == [False, True])
assert allclose_jd(t[0].value, 1)
assert t.value[1] is np.ma.masked
def test_python_timedelta_scalar():
td = timedelta(days=1, seconds=1)
td1 = TimeDelta(td, format="datetime")
assert td1.sec == 86401.0
td2 = TimeDelta(86401.0, format="sec")
assert td2.datetime == td
def test_python_timedelta_vector():
td = [
[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)],
]
td1 = TimeDelta(td, format="datetime")
assert np.all(td1.jd == [[1, 2], [3, 4]])
td2 = TimeDelta([[1, 2], [3, 4]], format="jd")
assert np.all(td2.datetime == td)
def test_timedelta_to_datetime():
td = TimeDelta(1, format="jd")
assert td.to_datetime() == timedelta(days=1)
td2 = TimeDelta([[1, 2], [3, 4]], format="jd")
td = [
[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)],
]
assert np.all(td2.to_datetime() == td)
def test_insert_timedelta():
tm = TimeDelta([1, 2], format="sec")
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, TimeDelta([10, 20], format="sec"))
assert np.all(tm2 == TimeDelta([1, 10, 20, 2], format="sec"))
def test_no_units_warning():
with pytest.warns(TimeDeltaMissingUnitWarning):
delta = TimeDelta(1)
assert delta.to_value(u.day) == 1
with pytest.warns(TimeDeltaMissingUnitWarning):
table = Table({"t": [1, 2, 3]})
delta = TimeDelta(table["t"])
assert np.all(delta.to_value(u.day) == [1, 2, 3])
with pytest.warns(TimeDeltaMissingUnitWarning):
delta = TimeDelta(np.array([1, 2, 3]))
assert np.all(delta.to_value(u.day) == [1, 2, 3])
with pytest.warns(TimeDeltaMissingUnitWarning):
t = Time("2012-01-01") + 1
assert t.isot[:10] == "2012-01-02"
with pytest.warns(TimeDeltaMissingUnitWarning):
comp = TimeDelta([1, 2, 3], format="jd") >= 2
assert np.all(comp == [False, True, True])
with pytest.warns(TimeDeltaMissingUnitWarning):
# 2 is also interpreted as days, not seconds
assert (TimeDelta(5 * u.s) > 2) is False
# with unit is ok
assert TimeDelta(1 * u.s).to_value(u.s) == 1
# with format is also ok
assert TimeDelta(1, format="sec").to_value(u.s) == 1
assert TimeDelta(1, format="jd").to_value(u.day) == 1
# table column with units
table = Table({"t": [1, 2, 3] * u.s})
assert np.all(TimeDelta(table["t"]).to_value(u.s) == [1, 2, 3])
|
2ef0ff8c6990f73b8bb9e2ad71c2fc249aec506d2f89974c8c9e4a9d74b5b8f8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import datetime
import functools
import os
from copy import deepcopy
from decimal import Decimal, localcontext
from io import StringIO
import erfa
import numpy as np
import pytest
from erfa import ErfaWarning
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.table import Column, Table
from astropy.time import (
STANDARD_TIME_SCALES,
TIME_FORMATS,
ScaleValueError,
Time,
TimeDelta,
TimeString,
TimezoneInfo,
conf,
)
from astropy.utils import iers, isiterable
from astropy.utils.compat.optional_deps import HAS_H5PY, HAS_PYTZ
from astropy.utils.exceptions import AstropyDeprecationWarning
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps * 24 * 3600
)
allclose_year = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=0.0
) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"]
t = Time(times, format="iso", scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2, np.array([-0.5 + 1.4288980208333335e-06, -0.50000000e00])
)
# Set scale to TAI
t = t.tai
assert (
repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2,
np.array([-0.5 + 0.00037179926839122024, -0.5 + 0.00039351851851851852]),
)
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (
repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>"
)
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(
t.cxcsec, np.array([31536064.307456788, 378691266.18400002])
)
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format="jd")
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000.0, 2450010.0)
t2 = Time(val, format="jd")
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.0
t3 = Time(val, val2, format="jd")
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.0) / 10.0).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format="jd")
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize("format_", Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == "tai"
@pytest.mark.parametrize("value", [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format="jd", scale="utc")
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format="iso", scale="tai", precision=1)
assert t2.value == "2010-01-01 00:00:34.0"
t2 = Time(t, format="iso", scale="tai", out_subfmt="date")
assert t2.value == "2010-01-01"
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format="mjd", scale="utc", location=("45d", "50d"))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format="mjd", scale="utc")
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.arange(len(mjd)), np.arange(len(mjd))),
)
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0.0, 0.0, 0.0), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0.0, 0.999, 0.2)
t7 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=("45d", "50d"),
)
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
)
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == "2010-01-01 00:00:00.000"
assert t.tt.iso == "2010-01-01 00:01:06.184"
assert t.tai.fits == "2010-01-01T00:00:34.000"
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == "2010-01-01T00:01:06.910"
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
# Uses initial class-defined precision=3
assert t.iso == "2010-01-01 00:00:00.000"
# Set instance precision to 9
t.precision = 9
assert t.iso == "2010-01-01 00:00:00.000000000"
assert t.tai.utc.iso == "2010-01-01 00:00:00.000000000"
def test_precision_input(self):
"""Verifies that precision can only be 0-9 (inclusive). Any other
value should raise a ValueError exception."""
err_message = "precision attribute must be an int"
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=10)
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.precision = -1
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=7,
location=(lon, lat),
)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843728"
assert t.tcb.iso == "2006-01-15 21:25:56.8939523"
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time("2006-01-15 21:24:37.5", format="iso", scale="utc", precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843725"
assert t.tcb.iso == "2006-01-15 21:25:56.8939519"
# Check we get the same result
t2 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
location=(0 * u.m, 0 * u.m, 0 * u.m),
)
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=location,
)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(location.x, location.y, location.z),
)
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert np.all(t.utc.iso == "2006-01-15 21:24:37.500000")
assert np.all(t.tdb.iso[0] == "2006-01-15 21:25:42.684373")
t2 = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert np.all(t2.utc.iso == "2006-01-15 21:24:37.500000")
assert t2.tdb.iso[0] == "2006-01-15 21:25:42.684373"
assert t2.tdb.iso[1] != "2006-01-15 21:25:42.684373"
with pytest.raises(ValueError): # 1 time, but two locations
Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
with pytest.raises(ValueError): # 3 times, but two locations
Time(
["2006-01-15 21:24:37.5"] * 3,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
# multidimensional
mjd = np.arange(50000.0, 50008.0).reshape(4, 2)
t3 = Time(mjd, format="mjd", scale="utc", location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(
mjd,
format="mjd",
scale="utc",
location=(
np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]]),
),
)
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp("auto_download", False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale=scale1,
location=(lon, lat),
)
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = "local"
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format="decimalyear")
Time(100.0, format="cxcsec")
Time(100.0, format="unix")
Time(100.0, format="gps")
Time(1950.0, format="byear", scale="tai")
Time(2000.0, format="jyear", scale="tai")
Time("B1950.0", format="byear_str", scale="tai")
Time("J2000.0", format="jyear_str", scale="tai")
Time("2000-01-01 12:23:34.0", format="iso", scale="tai")
Time("2000-01-01 12:23:34.0Z", format="iso", scale="utc")
Time("2000-01-01T12:23:34.0", format="isot", scale="tai")
Time("2000-01-01T12:23:34.0Z", format="isot", scale="utc")
Time("2000-01-01T12:23:34.0", format="fits")
Time("2000-01-01T12:23:34.0", format="fits", scale="tdb")
Time(2400000.5, 51544.0333981, format="jd", scale="tai")
Time(0.0, 51544.0333981, format="mjd", scale="tai")
Time("2000:001:12:23:34.0", format="yday", scale="tai")
Time("2000:001:12:23:34.0Z", format="yday", scale="utc")
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format="datetime", scale="tai")
Time([dt, dt], format="datetime", scale="tai")
dt64 = np.datetime64("2012-06-18T02:00:05.453000000")
Time(dt64, format="datetime64", scale="tai")
Time([dt64, dt64], format="datetime64", scale="tai")
def test_local_format_transforms(self):
"""
Test transformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time("2006-01-15 21:24:37.5", scale="local")
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(
t.decimalyear,
2006.0408002758752,
atol=0.001 / 3600.0 / 24.0 / 365.0,
rtol=0.0,
)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == "2006-01-15T21:24:37.500"
assert t.yday == "2006:015:21:24:37.500"
assert t.fits == "2006-01-15T21:24:37.500"
assert_allclose(
t.byear, 2006.04217888831, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert_allclose(
t.jyear, 2006.0407723496082, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert t.byear_str == "B2006.042"
assert t.jyear_str == "J2006.041"
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456000"
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale="utc")
assert t2.datetime == dt
t = Time([dt, dt2], scale="utc")
assert np.all(t.value == [dt, dt2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2 - dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
dt64_2 = np.datetime64("2000-01-02")
t = Time(dt64, scale="utc", precision=9, format="datetime64")
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64
t = Time(dt64_2, scale="utc", precision=3, format="datetime64")
assert t.iso == "2000-01-02 00:00:00.000"
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale="utc", format="datetime64")
assert np.all(t.value == [dt64, dt64_2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime64 == np.datetime64("2000-01-01T01:01:01.123456789")
# broadcasting
dt3 = (dt64 + (dt64_2 - dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc", format="datetime64")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format="datetime64")
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format="datetime64"))
assert Time(t3[2, 0], format="datetime64") == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format="jd", scale="tai", precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == "B2015.136594"
assert t.jyear_str == "J2015.134993"
t2 = Time(t.byear, format="byear", scale="tai")
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format="jyear", scale="tai")
assert allclose_jd(t2.jd, jd)
t = Time("J2015.134993", scale="tai", precision=6)
assert np.allclose(
t.jd, jd, rtol=1e-10, atol=0
) # J2015.134993 has 10 digit precision
assert t.byear_str == "B2015.136594"
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format="iso", scale="utc")
with pytest.raises(ValueError):
Time("2000:001", format="jd", scale="utc")
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ["bad"], format="mjd", scale="tai")
with pytest.raises(ValueError):
Time(50000.0, "bad", format="mjd", scale="tai")
with pytest.raises(ValueError):
Time("2005-08-04T00:01:02.000Z", scale="tai")
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format="jd", scale="utc")
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time("2000-01-02T03:04:05(TAI)", scale="utc")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(TAI")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(UT(NIST)")
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f"{year:04d}-{month:02d}"
yyyy_mm_dd = f"{year:04d}-{month:02d}-{day:02d}"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + "-01 23:59:60.0", scale="utc")
assert t1.iso == yyyy_mm + "-02 00:00:00.000"
# Leap second is different
t1 = Time(yyyy_mm_dd + " 23:59:59.900", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:59.900"
t1 = Time(yyyy_mm_dd + " 23:59:60.000", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.000"
t1 = Time(yyyy_mm_dd + " 23:59:60.999", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.999"
if month == 6:
yyyy_mm_dd_plus1 = f"{year:04d}-07-01"
else:
yyyy_mm_dd_plus1 = f"{year + 1:04d}-01-01"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + " 23:59:61.0", scale="utc")
assert t1.iso == yyyy_mm_dd_plus1 + " 00:00:00.000"
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + " 23:59:59", scale="utc")
t1 = Time(yyyy_mm_dd_plus1 + " 00:00:00", scale="utc")
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time("2007:001", scale="tai")
t2 = Time(["2007-01-02", "2007-01-03"], scale="utc")
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale="utc")
assert t3.scale == "utc"
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale="tt")
assert t3.scale == "tt"
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000.0, 50006.0)
frac = np.arange(0.0, 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format="mjd", scale="utc")
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale="local")
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize(
"d",
[
dict(val="2001:001", val2="ignored", scale="utc"),
dict(
val={
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
val2="ignored",
scale="utc",
),
dict(val=np.datetime64("2005-02-25"), val2="ignored", scale="utc"),
dict(
val=datetime.datetime(2000, 1, 2, 12, 0, 0), val2="ignored", scale="utc"
),
],
)
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format="mjd", scale="tai")
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000.0, 50007.0)
frac = np.arange(0.0, 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format="mjd", scale="utc")
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format="mjd", scale="tai")
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = 2458000 + np.arange(3)
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00:00.000",
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
# Heterogeneous input formats with in_subfmt='date_*'
times = ["2000-01-01 01:01", "2000-01-01 01:01:01", "2000-01-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai", in_subfmt="date_*")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time("2000-01-01 01:01", format="iso", scale="tai", in_subfmt="date")
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time(
"2000-01-01 01:01", format="iso", scale="tai", in_subfmt="doesnt exist"
)
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai", out_subfmt="date_hm")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00",
"2000-01-01 01:01",
"2000-01-01 01:01",
"2000-01-01 01:01",
]
)
)
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-01-01", "2000-01-01T01:01:01", "2000-01-01T01:01:01.123"]
t = Time(times, format="fits", scale="tai")
assert np.all(
t.fits
== np.array(
[
"2000-01-01T00:00:00.000",
"2000-01-01T01:01:01.000",
"2000-01-01T01:01:01.123",
]
)
)
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format="fits", out_subfmt="long*")
assert np.all(
t2.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+02000-01-01T01:01:01.123",
]
)
)
# Implicit long format for output, because of negative year.
times[2] = "-00594-01-01"
t3 = Time(times, format="fits", scale="tai")
assert np.all(
t3.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"-00594-01-01T00:00:00.000",
]
)
)
# Implicit long format for output, because of large positive year.
times[2] = "+10594-01-01"
t4 = Time(times, format="fits", scale="tai")
assert np.all(
t4.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+10594-01-01T00:00:00.000",
]
)
)
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-12-01", "2001-12-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai")
t.out_subfmt = "date_hm"
assert np.all(t.yday == np.array(["2000:336:00:00", "2001:335:01:01"]))
t.out_subfmt = "*"
assert np.all(
t.yday == np.array(["2000:336:00:00:00.000", "2001:335:01:01:01.123"])
)
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format="cxcsec", scale="utc")
assert t.scale == "utc"
t = Time(100.0, format="unix", scale="tai")
assert t.scale == "tai"
t = Time(100.0, format="gps", scale="utc")
assert t.scale == "utc"
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format="byear", scale="bad scale")
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time("2000:001:00:00:00", scale="bad scale")
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (
("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc"),
):
with pytest.warns(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][: inputs[0].index("(")], format="isot", scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:00.123456789(UTC)")
t = t.tai
assert t.isot == "1999-01-01T00:00:32.123"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)")
t = t.utc
assert t.isot == "1999-01-01T00:00:00.123"
# Check scale consistency
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="tai")
assert t.scale == "tai"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(ET)", scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError), pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format="cxcsec")
assert t.scale == "tt"
t = Time(100.0, format="unix")
assert t.scale == "utc"
t = Time(100.0, format="gps")
assert t.scale == "tai"
for date in ("2000:001", "2000-01-01T00:00:00"):
t = Time(date)
assert t.scale == "utc"
t = Time(2000.1, format="byear")
assert t.scale == "tt"
t = Time("J2000")
assert t.scale == "tt"
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format="cxcsec", scale="tai")
assert t.tt.iso == "1998-01-01 00:00:00.000"
# Create new time object from this one and change scale, format
t2 = Time(t, scale="tt", format="iso")
assert t2.value == "1998-01-01 00:00:00.000"
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format="cxcsec", scale="utc")
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == "2010:001:00:00:00.000"
t = Time("2010:001:00:00:00.000", scale="utc")
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Round trip through epoch time
for scale in ("utc", "tt"):
t = Time("2000:001", scale=scale)
t2 = Time(t.unix, scale=scale, format="unix")
assert getattr(t2, scale).iso == "2000-01-01 00:00:00.000"
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time("2013-05-20 21:18:46", scale="utc")
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time("2004-09-16T23:59:59", scale="utc")
assert allclose_sec(t.unix, 1095379199.0)
def test_plot_date(self):
"""Test the plot_date format.
Depending on the situation with matplotlib, this can give different
results because the plot date epoch time changed in matplotlib 3.3. This
test tries to use the matplotlib date2num function to make the test
independent of version, but if matplotlib isn't available then the code
(and test) use the pre-3.3 epoch.
"""
try:
from matplotlib.dates import date2num
except ImportError:
# No matplotlib, in which case this uses the epoch 0000-12-31
# as per matplotlib < 3.3.
# Value from:
# matplotlib.dates.set_epoch('0000-12-31')
# val = matplotlib.dates.date2num('2000-01-01')
val = 730120.0
else:
val = date2num(datetime.datetime(2000, 1, 1))
t = Time("2000-01-01 00:00:00", scale="utc")
assert np.allclose(t.plot_date, val, atol=1e-5, rtol=0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time("54321.000000000001", format="mjd")
assert t == Time(54321, 1e-12, format="mjd")
assert t.mjd == 54321.0 # Lost precision!
assert t.value == 54321.0 # Lost precision!
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", "bytes") == b"54321.000000000001"
expected_long = np.longdouble(54321.0) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(
t.to_value("mjd", subfmt="long"),
expected_long,
rtol=0,
atol=np.finfo(float).eps,
)
t.out_subfmt = "str"
assert t.value == "54321.000000000001"
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.mjd == "54321.000000000001"
assert t.to_value("mjd", subfmt="bytes") == b"54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
t.out_subfmt = "long"
assert np.allclose(t.value, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert np.allclose(
t.to_value("mjd", subfmt=None),
expected_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
assert np.allclose(t.mjd, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format="mjd")
expected = Time(i, f, format="mjd")
assert abs(t - expected) <= 20.0 * u.ps
t_float = Time(i + f, format="mjd")
assert t_float == Time(i, format="mjd")
assert t_float != t
assert t.value == 54321.0 # Lost precision!
assert np.allclose(
t.to_value("mjd", subfmt="long"),
mjd_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
t2 = Time(mjd_long, format="mjd", out_subfmt="long")
assert np.allclose(t2.value, mjd_long, rtol=0.0, atol=np.finfo(float).eps)
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble_one_val(self):
"""Ensure either val1 or val2 being longdouble is possible.
Regression test for issue gh-10033.
"""
i = 54321
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
t1 = Time(i, f, format="mjd")
t2 = Time(np.longdouble(i), f, format="mjd")
t3 = Time(i, np.longdouble(f), format="mjd")
t4 = Time(np.longdouble(i), np.longdouble(f), format="mjd")
assert t1 == t2 == t3 == t4
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1.0 if fmt == "mjd" else 24.0 * 3600.0)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol
)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt="long")
assert np.allclose(tm_long2, t_fmt_long2, rtol=0.0, atol=atol)
def test_subformat_input(self):
s = "54321.01234567890123456789"
i, f = s.split(".") # Note, OK only for fraction < 0.5
t = Time(float(i), float("." + f), format="mjd")
t_str = Time(s, format="mjd")
t_bytes = Time(s.encode("ascii"), format="mjd")
t_decimal = Time(Decimal(s), format="mjd")
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize("out_subfmt", ("str", "bytes"))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0.0, 1e-9, 1e-12])
t = Time(i, f, format="mjd", out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(
["54321.0", "54321.000000001", "54321.000000000001"], dtype=out_subfmt
)
assert np.all(t_value == expected)
assert np.all(Time(expected, format="mjd") == t)
# Explicit sub-format.
t = Time(i, f, format="mjd")
t_mjd_subfmt = t.to_value("mjd", subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize(
"fmt,string,val1,val2",
[
("jd", "2451544.5333981", 2451544.5, 0.0333981),
("decimalyear", "2000.54321", 2000.0, 0.54321),
("cxcsec", "100.0123456", 100.0123456, None),
("unix", "100.0123456", 100.0123456, None),
("gps", "100.0123456", 100.0123456, None),
("byear", "1950.1", 1950.1, None),
("jyear", "2000.1", 2000.1, None),
],
)
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt="str") == string
def test_basic_subformat_setting(self):
t = Time("2001", format="jyear", scale="tai")
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time("2001", format="jyear", scale="tai")
t.to_value("mjd", subfmt="str")
assert ("mjd", "str") in t.cache["format"]
t.to_value("mjd", "str")
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time("2001", format="jyear", scale="tai")
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time("2001", format="jyear", scale="tai")
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert (
t_s_2 == t2_s_40
), "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value("mjd", subfmt="decimal")
t2 = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value("mjd", subfmt="decimal")
t2_s_40 = t2.to_value("mjd", subfmt="decimal")
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize(
"f, s, t",
[
("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str),
],
)
def test_timedelta_basic(self, f, s, t):
dt = Time("58000", format="mjd", scale="tai") - Time(
"58001", format="mjd", scale="tai"
)
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time("J2000")
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match="format must be one of"):
t.to_value("julian")
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match="not among selected"):
Time("58000", format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(np.longdouble(58000), format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="str")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="long")
def test_wrong_subfmt(self):
t = Time(58000.0, format="mjd")
with pytest.raises(ValueError, match="must match one"):
t.to_value("mjd", subfmt="parrot")
with pytest.raises(ValueError, match="must match one"):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match="must match one"):
t.in_subfmt = "parrot"
def test_not_allowed_subfmt(self):
"""Test case where format has no defined subfmts"""
t = Time("J2000")
match = "subformat not allowed for format jyear_str"
with pytest.raises(ValueError, match=match):
t.to_value("jyear_str", subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", out_subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.in_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", format="jyear_str", in_subfmt="parrot")
def test_switch_to_format_with_no_out_subfmt(self):
t = Time("2001-01-01", out_subfmt="date_hm")
assert t.out_subfmt == "date_hm"
# Now do an in-place switch to format 'jyear_str' that has no subfmts
# where out_subfmt is changed to '*'.
t.format = "jyear_str"
assert t.out_subfmt == "*"
assert t.value == "J2001.001"
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r"bad day \(JD computed\)") as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.0])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format="jd", scale="tai")
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format="mjd", scale="tai")
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(["2000:001"], format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time("2000:001", format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time("2320-01-01", scale="tai").stardate)[:7] == "1368.99"
assert str(Time("2330-01-01", scale="tai").stardate)[:8] == "10552.76"
assert str(Time("2340-01-01", scale="tai").stardate)[:8] == "19734.02"
@pytest.mark.parametrize(
"dates",
[
(10000, "2329-05-26 03:02"),
(20000, "2340-04-15 19:05"),
(30000, "2351-03-07 11:08"),
],
)
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format="stardate")
t_iso = Time(t_star, format="iso", out_subfmt="date_hm")
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time("2000:001", format="yday", scale="tai")
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == "datetime"
assert t.scale == "utc"
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time("2001:001", format="yday")
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format="decimalyear")
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time("2000:001").jd
jd1 = Time("2001:001").jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd, jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format="jd", scale="tai")
assert t.fits == "0001-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0, format="jd", scale="tai")
assert t.fits == "+00000-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0 - 365.0, format="jd", scale="tai")
assert t.fits == "-00001-01-01T00:00:00.000"
def test_fits_year10000():
t = Time(5373484.5, format="jd", scale="tai")
assert t.fits == "+10000-01-01T00:00:00.000"
t = Time(5373484.5 - 365.0, format="jd", scale="tai")
assert t.fits == "9999-01-01T00:00:00.000"
t = Time(5373484.5, -1.0 / 24.0 / 3600.0, format="jd", scale="tai")
assert t.fits == "9999-12-31T23:59:59.000"
def test_dir():
t = Time("2000:001", format="yday", scale="tai")
assert "utc" in dir(t)
def test_time_from_epoch_jds():
"""Test that jd1/jd2 in a TimeFromEpoch format is always well-formed:
jd1 is an integral value and abs(jd2) <= 0.5.
"""
# From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will
# catch jd2 == 0 and a case of abs(jd2) == 0.5.
cxcsecs = np.linspace(0, 86400 * 1.5, 49)
for cxcsec in cxcsecs:
t = Time(cxcsec, format="cxcsec")
assert np.round(t.jd1) == t.jd1
assert np.abs(t.jd2) <= 0.5
t = Time(cxcsecs, format="cxcsec")
assert np.all(np.round(t.jd1) == t.jd1)
assert np.all(np.abs(t.jd2) <= 0.5)
assert np.any(np.abs(t.jd2) == 0.5) # At least one exactly 0.5
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format="mjd", scale="utc")
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert "Time" in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time("1900-01-01", scale="ut1")
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion(monkeypatch):
# Check that if we have internet, and downloading is allowed, we
# can get conversion to UT1 for the present, since we will download
# IERS_A in IERS_Auto.
monkeypatch.setattr("astropy.utils.iers.conf.auto_download", True)
Time(Time.now().cxcsec, format="cxcsec", scale="ut1")
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype(">f8")
little_endian = mjd.astype("<f8")
time_mjd = Time(mjd, format="mjd")
time_big = Time(big_endian, format="mjd")
time_little = Time(little_endian, format="mjd")
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = "longyear"
subfmts = (
(
"date",
r"(?P<year>[+-]\d{5})-%m-%d", # hybrid
"{year:+06d}-{mon:02d}-{day:02d}",
),
)
t = Time("+02000-02-03", format="longyear")
assert t.value == "+02000-02-03"
assert t.jd == Time("2000-02-03").jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (
("jd", 2451577.5),
("mjd", 51577.0),
("cxcsec", 65923264.184), # confirmed with Chandra.Time
("datetime", datetime.datetime(2000, 2, 3, 0, 0)),
("iso", "2000-02-03 00:00:00.000"),
):
t = Time("+02000-02-03", format="fits")
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_unix_tai_format():
t = Time("2020-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 37.0)
t = Time("1970-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 8 + 8.2e-05)
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="date_hms", precision=5)
tc = t.copy()
t.format = "isot"
assert t.precision == 5
assert t.out_subfmt == "date_hms"
assert t.value == "2000-02-03T00:00:00.00000"
t.format = "fits"
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="longdate")
t.format = "isot"
assert t.out_subfmt == "*" # longdate_hms not there, goes to default
assert t.value == "2000-02-03T00:00:00.000"
t.format = "fits"
assert t.out_subfmt == "*"
assert t.value == "2000-02-03T00:00:00.000" # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time("2007:001", scale="tai")
with pytest.raises(ValueError) as err:
t1.replicate(format="definitely_not_a_valid_format")
assert "format must be one of" in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time("2007:001", scale="tai")
assert "astropy_time" not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format="astropy_time")
assert "format must be one of" in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(
["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"],
format="iso",
scale="utc",
)
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10 * u.hour, tzname="US/Hawaii")
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError, match=r"does not support leap seconds"):
Time("2015-06-30 23:59:60.000").to_datetime()
@pytest.mark.skipif(not HAS_PYTZ, reason="requires pytz")
def test_to_datetime_pytz():
import pytz
tz = pytz.timezone("US/Hawaii")
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time("2010-09-03 00:00:00")
t2 = Time("2010-09-03 00:00:00")
# Time starts out without a cache
assert "cache" not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache["format"]["iso"] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache["scale"]["tai"] == t2.tai
# New Time object after scale transform does not have a cache yet
assert "cache" not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert "cache" not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert "cache" in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [
[[f"{y:04d}-{m:02d}-{d:02d}" for d in range(1, 3)] for m in range(5, 7)]
for y in range(2012, 2014)
]
cutf32 = Column(times)
cbytes = cutf32.astype("S")
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(["B1950"]))
tbytes = Time(Column([b"B1950"]))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b"2012-01-01", b"2012-01-01T00:00:00"])
assert np.all(Time(times) == Time(["2012-01-01", "2012-01-01T00:00:00"]))
def test_bytes_input():
tstring = "2011-01-02T03:04:05"
tbytes = b"2011-01-02T03:04:05"
assert tbytes.decode("ascii") == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == "S"
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format="cxcsec")
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time("2000:001", scale="utc")
t[()] = "2000:002"
assert t.value.startswith("2000:002")
# Transformed attribute is not writeable
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = "2005:001"
assert "Time object is read-only. Make a copy()" in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format="cxcsec")
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location=None".format(loc[0]) in str(err.value)
)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format="cxcsec", location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location={}".format(loc[0], loc[1]) in str(err.value)
)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format="cxcsec")
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location=None and "
"got location={}".format(loc[1]) in str(err.value)
)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
t[0, :] = Time([-3, -4], format="cxcsec", location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format="cxcsec")
assert t.cache == {}
t.iso
assert "iso" in t.cache["format"]
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:00:02.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [3, 4]])
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:01:40.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [200, 200]])
# Array of strings in yday format
t[:, 1] = ["1998:002", "1998:003"]
assert allclose_sec(t.value, [[1, 86400 * 1], [200, 86400 * 2]])
# Incompatible numeric value
t = Time(["2000:001", "2000:002"])
t[0] = "2001:001"
with pytest.raises(ValueError) as err:
t[0] = 100
assert "cannot convert value to a compatible Time object" in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object."""
# Set from time object with different scale
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = Time(["2000:010"], scale="tai")
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(["2000:001", "2000:002"], scale="utc")
t2.format = "jyear"
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format="cxcsec")
with pytest.raises(IndexError):
t["asdf"] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format="cxcsec")
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, "_delta_tdb_tt")
assert not hasattr(t, "_delta_ut1_utc")
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time("1999-01-01T01:01:01")
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strftime_array():
tstrings = ["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1995-12-31 23:59:60"]
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S").tolist() == tstrings
def test_strftime_array_2():
tstrings = [
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1995-12-31 23:59:60"],
]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime("%Y-%m-%d %H:%M:%S") == tstrings)
assert t.strftime("%Y-%m-%d %H:%M:%S").shape == tstrings.shape
def test_strftime_leapsecond():
time_string = "1995-12-31 23:59:60"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strptime_scalar():
"""Test of Time.strptime"""
time_string = "2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01", "1998-Jan-01 00:00:02"],
["1998-Jan-01 00:00:03", "1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, "%S")
def test_strptime_input_bytes_scalar():
time_string = b"2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [
[b"1998-Jan-01 00:00:01", b"1998-Jan-01 00:00:02"],
[b"1998-Jan-01 00:00:03", b"1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time("1995-12-31T23:59:60", format="isot")
time_obj2 = Time.strptime("1995-Dec-31 23:59:60", "%Y-%b-%d %H:%M:%S")
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time("0995-12-31T00:00:00", format="isot", scale="tai")
time_obj2 = Time.strptime("0995-Dec-31 00:00:00", "%Y-%b-%d %H:%M:%S", scale="tai")
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = "2007-May-04 21:08:12.123"
time_object = Time("2007-05-04 21:08:12.123")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S.%f")
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01.123", "1998-Jan-01 00:00:02.000001"],
["1998-Jan-01 00:00:03.000900", "1998-Jan-01 00:00:04.123456"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01.123", "1998-01-01 00:00:02.000001"],
["1998-01-01 00:00:03.000900", "1998-01-01 00:00:04.123456"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S.%f")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00.123"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == time_string
def test_strftime_scalar_fracsec_precision():
time_string = "2010-09-03 06:00:00.123123123"
t = Time(time_string)
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123"
t.precision = 9
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123123123"
def test_strftime_array_fracsec():
tstrings = [
"2010-09-03 00:00:00.123000",
"2005-09-03 06:00:00.000001",
"1995-12-31 23:59:60.000900",
]
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f").tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format="unix")
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, "1970-01-01 00:01:00")
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time("1970-01-01 00:01:00"))
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time("1970-01-01 00:01:00")])
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format="unix"))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format="unix"))
def test_insert_time_out_subfmt():
# Check insert() with out_subfmt set
T = Time(["1999-01-01", "1999-01-02"], out_subfmt="date")
T = T.insert(0, T[0])
assert T.out_subfmt == "date"
assert T[0] == T[1]
T = T.insert(1, "1999-01-03")
assert T.out_subfmt == "date"
assert str(T[1]) == "1999-01-03"
def test_insert_exceptions():
tm = Time(1, format="unix")
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert "cannot insert into scalar" in str(err.value)
tm = Time([1, 2], format="unix")
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert "axis must be 0" in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert "obj arg must be an integer" in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert "index -100 is out of bounds for axis 0 with size 2" in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
t = Time(dt64, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format="cxcsec", location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format="cxcsec", location=loc)
t2 = Time(1, format="cxcsec")
assert hash(t) != hash(t2)
t = Time("2000:180", scale="utc")
t2 = Time(t, scale="tai")
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format="sec")
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time("2000:001", format="not-a-format")
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200")
assert "Input values did not match any of the formats where" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200", format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "ValueError: Time 200 does not match iso format"
) == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "TypeError: Input values for iso class must be strings"
) == str(err.value)
def test_ymdhms_defaults():
t1 = Time({"year": 2001}, format="ymdhms")
assert t1 == Time("2001-01-01")
times_dict_ns = {
"year": [2001, 2002],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [8, 9],
"second": [10, 11],
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ("year", "month", "day", "hour", "minute", "second")
@pytest.mark.parametrize("tm_input", [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
@pytest.mark.parametrize("as_row", [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(["2001-02-04 06:08:10", "2002-03-05 07:09:11"])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {"year": [[2001, 2002], [2003, 2004]], "month": [2, 3], "day": 4}
time_shape = Time([["2001-02-04", "2002-03-04"], ["2003-02-04", "2004-03-04"]])
time = Time(times_dict_shape, format="ymdhms")
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
"year": 2016,
"month": 12,
"day": 31,
"hour": 23,
"minute": 59,
"second": 60.123456789,
}
tm = Time(time_dict, **kwargs)
assert tm == Time("2016-12-31T23:59:60.123456789")
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == "second":
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match="input must be dict or table-like"):
Time(10, format="ymdhms")
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({"year": 2019, "wrong": 1}, format="ymdhms")
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({"year": 2019, "minute": 1}, format="ymdhms")
def test_ymdhms_masked():
tm = Time({"year": [2000, 2001]}, format="ymdhms")
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time(
{
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
scale="utc",
)
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_ecsv(fmt):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t["a"].format == t2["a"].format
# Some loss of precision in the serialization
assert not np.all(t["a"] == t2["a"])
# But no loss in the format representation
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_fits(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.fits"
t.write(out, format="fits")
t2 = Table.read(out, format="fits", astropy_native=True)
# Currently the format is lost in FITS so set it back
t2["a"].format = fmt
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.skipif(not HAS_H5PY, reason="Needs h5py")
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_hdf5(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.h5"
t.write(str(out), format="hdf5", path="root", serialize_meta=True)
t2 = Table.read(str(out), format="hdf5", path="root")
assert t["a"].format == t2["a"].format
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time("J2015") + np.linspace(-1, 1, 10) * u.day
t[2] = Time(58000, format="mjd")
def test_format_subformat_compatibility():
"""Test that changing format with out_subfmt defined is not a problem.
See #9812, #9810."""
t = Time("2019-12-20", out_subfmt="date_??")
assert t.mjd == 58837.0
assert t.yday == "2019:354:00:00" # Preserves out_subfmt
t2 = t.replicate(format="mjd")
assert t2.out_subfmt == "*" # Changes to default
t2 = t.copy(format="mjd")
assert t2.out_subfmt == "*"
t2 = Time(t, format="mjd")
assert t2.out_subfmt == "*"
t2 = t.copy(format="yday")
assert t2.out_subfmt == "date_??"
assert t2.value == "2019:354:00:00"
t.format = "yday"
assert t.value == "2019:354:00:00"
assert t.out_subfmt == "date_??"
t = Time("2019-12-20", out_subfmt="date")
assert t.mjd == 58837.0
assert t.yday == "2019:354"
@pytest.mark.parametrize("use_fast_parser", ["force", "False"])
def test_format_fractional_string_parsing(use_fast_parser):
"""Test that string like "2022-08-01.123" does not parse as ISO.
See #6476 and the fix."""
with pytest.raises(
ValueError, match=r"Input values did not match the format class iso"
):
with conf.set_temp("use_fast_parser", use_fast_parser):
Time("2022-08-01.123", format="iso")
@pytest.mark.parametrize("fmt_name,fmt_class", TIME_FORMATS.items())
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class):
"""From a starting Time value, test that every valid combination of
to_value(format, subfmt) works. See #9812, #9361.
"""
t = Time("2000-01-01")
subfmts = list(subfmt[0] for subfmt in fmt_class.subfmts) + [None, "*"]
for subfmt in subfmts:
t.to_value(fmt_name, subfmt)
@pytest.mark.parametrize("location", [None, (45, 45)])
def test_location_init(location):
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances.
"""
tm = Time("J2010", location=location)
# Init from a scalar Time
tm2 = Time(tm)
assert np.all(tm.location == tm2.location)
assert type(tm.location) is type(tm2.location)
# From a list of Times
tm2 = Time([tm, tm])
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location)
# Effectively the same as a list of Times, but just to be sure that
# Table mixin initialization is working as expected.
tm2 = Table([[tm, tm]])["col0"]
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location)
def test_location_init_fail():
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances. Make sure exception is correct.
"""
tm = Time("J2010", location=(45, 45))
tm2 = Time("J2010")
with pytest.raises(
ValueError, match="cannot concatenate times unless all locations"
):
Time([tm, tm2])
def test_linspace():
"""Test `np.linspace` `__array_func__` implementation for scalar and arrays."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"])
t2 = Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"])
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts = np.linspace(t1[0], t2[0], 3)
assert ts[0].isclose(Time("2021-01-01 00:00:00"), atol=atol)
assert ts[1].isclose(Time("2021-01-01 00:30:00"), atol=atol)
assert ts[2].isclose(Time("2021-01-01 01:00:00"), atol=atol)
ts = np.linspace(t1, t2[0], 2, endpoint=False)
assert ts.shape == (2, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:30:00", "2021-01-01 12:30:00"]), atol=atol)
)
ts = np.linspace(t1, t2, 7)
assert ts.shape == (7, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:10:00", "2021-03-03 00:00:00"]), atol=atol)
)
assert all(
ts[5].isclose(Time(["2021-01-01 00:50:00", "2021-10-29 00:00:00"]), atol=atol)
)
assert all(
ts[6].isclose(Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"]), atol=atol)
)
def test_linspace_steps():
"""Test `np.linspace` `retstep` option."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-01 12:00:00"])
t2 = Time("2021-01-02 00:00:00")
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts, st = np.linspace(t1, t2, 7, retstep=True)
assert ts.shape == (7, 2)
assert st.shape == (2,)
assert all(ts[1].isclose(ts[0] + st, atol=atol))
assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol))
assert all(st.isclose(TimeDelta([14400, 7200], format="sec"), atol=atol))
def test_linspace_fmts():
"""Test `np.linspace` `__array_func__` implementation for start/endpoints
from different formats/systems.
"""
t1 = Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"])
t2 = Time(2458850, format="jd")
t3 = Time(1578009600, format="unix")
atol = 2 * np.finfo(float).eps * abs(t1 - Time([t2, t3])).max()
ts = np.linspace(t1, t2, 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-01 18:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-01 12:00:00"]), atol=atol)
)
ts = np.linspace(t1, Time([t2, t3]), 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-02 12:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-03 00:00:00"]), atol=atol)
)
def test_to_string():
dims = [8, 2, 8]
dx = np.arange(np.prod(dims)).reshape(dims)
tm = Time("2020-01-01", out_subfmt="date") + dx * u.day
exp_lines = [
"[[['2020-01-01' '2020-01-02' ... '2020-01-07' '2020-01-08']",
" ['2020-01-09' '2020-01-10' ... '2020-01-15' '2020-01-16']]",
"",
" [['2020-01-17' '2020-01-18' ... '2020-01-23' '2020-01-24']",
" ['2020-01-25' '2020-01-26' ... '2020-01-31' '2020-02-01']]",
"",
" ...",
"",
" [['2020-04-06' '2020-04-07' ... '2020-04-12' '2020-04-13']",
" ['2020-04-14' '2020-04-15' ... '2020-04-20' '2020-04-21']]",
"",
" [['2020-04-22' '2020-04-23' ... '2020-04-28' '2020-04-29']",
" ['2020-04-30' '2020-05-01' ... '2020-05-06' '2020-05-07']]]",
]
exp_str = "\n".join(exp_lines)
with np.printoptions(threshold=100, edgeitems=2, linewidth=75):
out_str = str(tm)
out_repr = repr(tm)
assert out_str == exp_str
exp_repr = f"<Time object: scale='utc' format='iso' value={exp_str}>"
assert out_repr == exp_repr
|
116a3b94f8e83329cdddd14fa350baa8bdbd5828cf06bd98cef7c3713e29a041 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Column
from astropy.time import Time, TimeDelta
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
class TestTimeQuantity:
"""Test Interaction of Time with Quantities"""
def test_valid_quantity_input(self):
"""Test Time formats that are allowed to take quantity input."""
q = 2450000.125 * u.day
t1 = Time(q, format="jd", scale="utc")
assert t1.value == q.value
q2 = q.to(u.second)
t2 = Time(q2, format="jd", scale="utc")
assert t2.value == q.value == q2.to_value(u.day)
q3 = q - 2400000.5 * u.day
t3 = Time(q3, format="mjd", scale="utc")
assert t3.value == q3.value
# test we can deal with two quantity arguments, with different units
qs = 24.0 * 36.0 * u.second
t4 = Time(q3, qs, format="mjd", scale="utc")
assert t4.value == (q3 + qs).to_value(u.day)
qy = 1990.0 * u.yr
ty1 = Time(qy, format="jyear", scale="utc")
assert ty1.value == qy.value
ty2 = Time(qy.to(u.day), format="jyear", scale="utc")
assert ty2.value == qy.value
qy2 = 10.0 * u.yr
tcxc = Time(qy2, format="cxcsec")
assert tcxc.value == qy2.to_value(u.second)
tgps = Time(qy2, format="gps")
assert tgps.value == qy2.to_value(u.second)
tunix = Time(qy2, format="unix")
assert tunix.value == qy2.to_value(u.second)
qd = 2000.0 * 365.0 * u.day
tplt = Time(qd, format="plot_date", scale="utc")
assert tplt.value == qd.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.m, format="jd", scale="utc")
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.dimensionless_unscaled, format="jd", scale="utc")
def test_column_with_and_without_units(self):
"""Ensure a Column without a unit is treated as an array [#3648]"""
a = np.arange(50000.0, 50010.0)
ta = Time(a, format="mjd")
c1 = Column(np.arange(50000.0, 50010.0), name="mjd")
tc1 = Time(c1, format="mjd")
assert np.all(ta == tc1)
c2 = Column(np.arange(50000.0, 50010.0), name="mjd", unit="day")
tc2 = Time(c2, format="mjd")
assert np.all(ta == tc2)
c3 = Column(np.arange(50000.0, 50010.0), name="mjd", unit="m")
with pytest.raises(u.UnitsError):
Time(c3, format="mjd")
def test_no_quantity_input_allowed(self):
"""Time formats that are not allowed to take Quantity input."""
qy = 1990.0 * u.yr
for fmt in ("iso", "yday", "datetime", "byear", "byear_str", "jyear_str"):
with pytest.raises(ValueError):
Time(qy, format=fmt, scale="utc")
def test_valid_quantity_operations(self):
"""Check that adding a time-valued quantity to a Time gives a Time"""
t0 = Time(100000.0, format="cxcsec")
q1 = 10.0 * u.second
t1 = t0 + q1
assert isinstance(t1, Time)
assert t1.value == t0.value + q1.to_value(u.second)
q2 = 1.0 * u.day
t2 = t0 - q2
assert allclose_sec(t2.value, t0.value - q2.to_value(u.second))
# check broadcasting
q3 = np.arange(15.0).reshape(3, 5) * u.hour
t3 = t0 - q3
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value - q3.to_value(u.second))
def test_invalid_quantity_operations(self):
"""Check that comparisons of Time with quantities does not work
(even for time-like, since we cannot compare Time to TimeDelta)"""
with pytest.raises(TypeError):
Time(100000.0, format="cxcsec") > 10.0 * u.m
with pytest.raises(TypeError):
Time(100000.0, format="cxcsec") > 10.0 * u.second
class TestTimeDeltaQuantity:
"""Test interaction of TimeDelta with Quantities"""
def test_valid_quantity_input(self):
"""Test that TimeDelta can take quantity input."""
q = 500.25 * u.day
dt1 = TimeDelta(q, format="jd")
assert dt1.value == q.value
dt2 = TimeDelta(q, format="sec")
assert dt2.value == q.to_value(u.second)
dt3 = TimeDelta(q)
assert dt3.value == q.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
TimeDelta(2450000.0 * u.m, format="jd")
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.dimensionless_unscaled, format="jd", scale="utc")
with pytest.raises(TypeError):
TimeDelta(100, format="sec") > 10.0 * u.m
def test_quantity_output(self):
q = 500.25 * u.day
dt = TimeDelta(q)
assert dt.to(u.day) == q
assert dt.to_value(u.day) == q.value
assert dt.to_value("day") == q.value
assert dt.to(u.second).value == q.to_value(u.second)
assert dt.to_value(u.second) == q.to_value(u.second)
assert dt.to_value("s") == q.to_value(u.second)
# Following goes through "format", but should be the same.
assert dt.to_value("sec") == q.to_value(u.second)
def test_quantity_output_errors(self):
dt = TimeDelta(250.0, format="sec")
with pytest.raises(u.UnitsError):
dt.to(u.m)
with pytest.raises(u.UnitsError):
dt.to_value(u.m)
with pytest.raises(u.UnitsError):
dt.to_value(unit=u.m)
with pytest.raises(
ValueError,
match="not one of the known formats.*failed to parse as a unit",
):
dt.to_value("parrot")
with pytest.raises(TypeError):
dt.to_value("sec", unit=u.s)
with pytest.raises(TypeError):
# TODO: would be nice to make this work!
dt.to_value(u.s, subfmt="str")
def test_valid_quantity_operations1(self):
"""Check adding/subtracting/comparing a time-valued quantity works
with a TimeDelta. Addition/subtraction should give TimeDelta"""
t0 = TimeDelta(106400.0, format="sec")
q1 = 10.0 * u.second
t1 = t0 + q1
assert isinstance(t1, TimeDelta)
assert t1.value == t0.value + q1.to_value(u.second)
q2 = 1.0 * u.day
t2 = t0 - q2
assert isinstance(t2, TimeDelta)
assert allclose_sec(t2.value, t0.value - q2.to_value(u.second))
# now comparisons
assert t0 > q1
assert t0 < 1.0 * u.yr
# and broadcasting
q3 = np.arange(12.0).reshape(4, 3) * u.hour
t3 = t0 + q3
assert isinstance(t3, TimeDelta)
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value + q3.to_value(u.second))
def test_valid_quantity_operations2(self):
"""Check that TimeDelta is treated as a quantity where possible."""
t0 = TimeDelta(100000.0, format="sec")
f = 1.0 / t0
assert isinstance(f, u.Quantity)
assert f.unit == 1.0 / u.day
g = 10.0 * u.m / u.second**2
v = t0 * g
assert isinstance(v, u.Quantity)
assert u.allclose(v, t0.sec * g.value * u.m / u.second)
q = np.log10(t0 / u.second)
assert isinstance(q, u.Quantity)
assert q.value == np.log10(t0.sec)
s = 1.0 * u.m
v = s / t0
assert isinstance(v, u.Quantity)
assert u.allclose(v, 1.0 / t0.sec * u.m / u.s)
t = 1.0 * u.s
t2 = t0 * t
assert isinstance(t2, u.Quantity)
assert u.allclose(t2, t0.sec * u.s**2)
t3 = [1] / t0
assert isinstance(t3, u.Quantity)
assert u.allclose(t3, 1 / (t0.sec * u.s))
# broadcasting
t1 = TimeDelta(np.arange(100000.0, 100012.0).reshape(6, 2), format="sec")
f = np.array([1.0, 2.0]) * u.cycle * u.Hz
phase = f * t1
assert isinstance(phase, u.Quantity)
assert phase.shape == t1.shape
assert u.allclose(phase, t1.sec * f.value * u.cycle)
q = t0 * t1
assert isinstance(q, u.Quantity)
assert np.all(q == t0.to(u.day) * t1.to(u.day))
q = t1 / t0
assert isinstance(q, u.Quantity)
assert np.all(q == t1.to(u.day) / t0.to(u.day))
def test_valid_quantity_operations3(self):
"""Test a TimeDelta remains one if possible."""
t0 = TimeDelta(10.0, format="jd")
q = 10.0 * u.one
t1 = q * t0
assert isinstance(t1, TimeDelta)
assert t1 == TimeDelta(100.0, format="jd")
t2 = t0 * q
assert isinstance(t2, TimeDelta)
assert t2 == TimeDelta(100.0, format="jd")
t3 = t0 / q
assert isinstance(t3, TimeDelta)
assert t3 == TimeDelta(1.0, format="jd")
q2 = 1.0 * u.percent
t4 = t0 * q2
assert isinstance(t4, TimeDelta)
assert abs(t4 - TimeDelta(0.1, format="jd")) < 1.0 * u.ns
q3 = 1.0 * u.hr / (36.0 * u.s)
t5 = q3 * t0
assert isinstance(t4, TimeDelta)
assert abs(t5 - TimeDelta(1000.0, format="jd")) < 1.0 * u.ns
# Test multiplication with a unit.
t6 = t0 * u.one
assert isinstance(t6, TimeDelta)
assert t6 == TimeDelta(10.0, format="jd")
t7 = u.one * t0
assert isinstance(t7, TimeDelta)
assert t7 == TimeDelta(10.0, format="jd")
t8 = t0 * ""
assert isinstance(t8, TimeDelta)
assert t8 == TimeDelta(10.0, format="jd")
t9 = "" * t0
assert isinstance(t9, TimeDelta)
assert t9 == TimeDelta(10.0, format="jd")
t10 = t0 / u.one
assert isinstance(t10, TimeDelta)
assert t6 == TimeDelta(10.0, format="jd")
t11 = t0 / ""
assert isinstance(t11, TimeDelta)
assert t11 == TimeDelta(10.0, format="jd")
t12 = t0 / [1]
assert isinstance(t12, TimeDelta)
assert t12 == TimeDelta(10.0, format="jd")
t13 = [1] * t0
assert isinstance(t13, TimeDelta)
assert t13 == TimeDelta(10.0, format="jd")
def test_invalid_quantity_operations(self):
"""Check comparisons of TimeDelta with non-time quantities fails."""
with pytest.raises(TypeError):
TimeDelta(100000.0, format="sec") > 10.0 * u.m
def test_invalid_quantity_operations2(self):
"""Check that operations with non-time/quantity fail."""
td = TimeDelta(100000.0, format="sec")
with pytest.raises(TypeError):
td * object()
with pytest.raises(TypeError):
td / object()
def test_invalid_quantity_broadcast(self):
"""Check broadcasting rules in interactions with Quantity."""
t0 = TimeDelta(np.arange(12.0).reshape(4, 3), format="sec")
with pytest.raises(ValueError):
t0 + np.arange(4.0) * u.s
class TestDeltaAttributes:
def test_delta_ut1_utc(self):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=6)
t.delta_ut1_utc = 0.3 * u.s
assert t.ut1.iso == "2010-01-01 00:00:00.300000"
t.delta_ut1_utc = 0.4 / 60.0 * u.minute
assert t.ut1.iso == "2010-01-01 00:00:00.400000"
with pytest.raises(u.UnitsError):
t.delta_ut1_utc = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_ut1_utc = TimeDelta(0.3, format="sec")
assert t.ut1.iso == "2010-01-01 00:00:00.300000"
t.delta_ut1_utc = TimeDelta(0.5 / 24.0 / 3600.0, format="jd")
assert t.ut1.iso == "2010-01-01 00:00:00.500000"
def test_delta_tdb_tt(self):
t = Time("2010-01-01 00:00:00", format="iso", scale="tt", precision=6)
t.delta_tdb_tt = 20.0 * u.second
assert t.tdb.iso == "2010-01-01 00:00:20.000000"
t.delta_tdb_tt = 30.0 / 60.0 * u.minute
assert t.tdb.iso == "2010-01-01 00:00:30.000000"
with pytest.raises(u.UnitsError):
t.delta_tdb_tt = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_tdb_tt = TimeDelta(40.0, format="sec")
assert t.tdb.iso == "2010-01-01 00:00:40.000000"
t.delta_tdb_tt = TimeDelta(50.0 / 24.0 / 3600.0, format="jd")
assert t.tdb.iso == "2010-01-01 00:00:50.000000"
@pytest.mark.parametrize(
"q1, q2",
(
(5e8 * u.s, None),
(5e17 * u.ns, None),
(4e8 * u.s, 1e17 * u.ns),
(4e14 * u.us, 1e17 * u.ns),
),
)
def test_quantity_conversion_rounding(q1, q2):
"""Check that no rounding errors are incurred by unit conversion.
This occurred before as quantities in seconds were converted to days
before trying to split them into two-part doubles. See gh-7622.
"""
t = Time("2001-01-01T00:00:00.", scale="tai")
expected = Time("2016-11-05T00:53:20.", scale="tai")
if q2 is None:
t0 = t + q1
else:
t0 = t + q1 + q2
assert abs(t0 - expected) < 20 * u.ps
dt1 = TimeDelta(q1, q2)
t1 = t + dt1
assert abs(t1 - expected) < 20 * u.ps
dt2 = TimeDelta(q1, q2, format="sec")
t2 = t + dt2
assert abs(t2 - expected) < 20 * u.ps
|
90318fa39eeac2cccf707fc9c39d1ef6d59c97b9cec206a53a6914b873484b7c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import itertools
import warnings
import numpy as np
import pytest
import astropy.units as u
from astropy.time import Time
from astropy.time.utils import day_frac
from astropy.units.quantity_helper.function_helpers import ARRAY_FUNCTION_ENABLED
from astropy.utils import iers
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
def assert_time_all_equal(t1, t2):
"""Checks equality of shape and content."""
assert t1.shape == t2.shape
assert np.all(t1 == t2)
class ShapeSetup:
def setup_class(cls):
mjd = np.arange(50000, 50010)
frac = np.arange(0.0, 0.999, 0.2)
frac_masked = np.ma.array(frac)
frac_masked[1] = np.ma.masked
cls.t0 = {
"not_masked": Time(mjd[:, np.newaxis] + frac, format="mjd", scale="utc"),
"masked": Time(mjd[:, np.newaxis] + frac_masked, format="mjd", scale="utc"),
}
cls.t1 = {
"not_masked": Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=("45d", "50d"),
),
"masked": Time(
mjd[:, np.newaxis] + frac_masked,
format="mjd",
scale="utc",
location=("45d", "50d"),
),
}
cls.t2 = {
"not_masked": Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
),
"masked": Time(
mjd[:, np.newaxis] + frac_masked,
format="mjd",
scale="utc",
location=(np.arange(len(frac_masked)), np.arange(len(frac_masked))),
),
}
def create_data(self, use_mask):
self.t0 = self.__class__.t0[use_mask]
self.t1 = self.__class__.t1[use_mask]
self.t2 = self.__class__.t2[use_mask]
@pytest.mark.parametrize("use_mask", ("masked", "not_masked"))
class TestManipulation(ShapeSetup):
"""Manipulation of Time objects, ensuring attributes are done correctly."""
def test_ravel(self, use_mask):
self.create_data(use_mask)
t0_ravel = self.t0.ravel()
assert t0_ravel.shape == (self.t0.size,)
assert np.all(t0_ravel.jd1 == self.t0.jd1.ravel())
assert np.may_share_memory(t0_ravel.jd1, self.t0.jd1)
assert t0_ravel.location is None
t1_ravel = self.t1.ravel()
assert t1_ravel.shape == (self.t1.size,)
assert np.all(t1_ravel.jd1 == self.t1.jd1.ravel())
assert np.may_share_memory(t1_ravel.jd1, self.t1.jd1)
assert t1_ravel.location is self.t1.location
t2_ravel = self.t2.ravel()
assert t2_ravel.shape == (self.t2.size,)
assert np.all(t2_ravel.jd1 == self.t2.jd1.ravel())
assert np.may_share_memory(t2_ravel.jd1, self.t2.jd1)
assert t2_ravel.location.shape == t2_ravel.shape
# Broadcasting and ravelling cannot be done without a copy.
assert not np.may_share_memory(t2_ravel.location, self.t2.location)
def test_flatten(self, use_mask):
self.create_data(use_mask)
t0_flatten = self.t0.flatten()
assert t0_flatten.shape == (self.t0.size,)
assert t0_flatten.location is None
# Flatten always makes a copy.
assert not np.may_share_memory(t0_flatten.jd1, self.t0.jd1)
t1_flatten = self.t1.flatten()
assert t1_flatten.shape == (self.t1.size,)
assert not np.may_share_memory(t1_flatten.jd1, self.t1.jd1)
assert t1_flatten.location is not self.t1.location
assert t1_flatten.location == self.t1.location
t2_flatten = self.t2.flatten()
assert t2_flatten.shape == (self.t2.size,)
assert not np.may_share_memory(t2_flatten.jd1, self.t2.jd1)
assert t2_flatten.location.shape == t2_flatten.shape
assert not np.may_share_memory(t2_flatten.location, self.t2.location)
def test_transpose(self, use_mask):
self.create_data(use_mask)
t0_transpose = self.t0.transpose()
assert t0_transpose.shape == (5, 10)
assert np.all(t0_transpose.jd1 == self.t0.jd1.transpose())
assert np.may_share_memory(t0_transpose.jd1, self.t0.jd1)
assert t0_transpose.location is None
t1_transpose = self.t1.transpose()
assert t1_transpose.shape == (5, 10)
assert np.all(t1_transpose.jd1 == self.t1.jd1.transpose())
assert np.may_share_memory(t1_transpose.jd1, self.t1.jd1)
assert t1_transpose.location is self.t1.location
t2_transpose = self.t2.transpose()
assert t2_transpose.shape == (5, 10)
assert np.all(t2_transpose.jd1 == self.t2.jd1.transpose())
assert np.may_share_memory(t2_transpose.jd1, self.t2.jd1)
assert t2_transpose.location.shape == t2_transpose.shape
assert np.may_share_memory(t2_transpose.location, self.t2.location)
# Only one check on T, since it just calls transpose anyway.
t2_T = self.t2.T
assert t2_T.shape == (5, 10)
assert np.all(t2_T.jd1 == self.t2.jd1.T)
assert np.may_share_memory(t2_T.jd1, self.t2.jd1)
assert t2_T.location.shape == t2_T.location.shape
assert np.may_share_memory(t2_T.location, self.t2.location)
def test_diagonal(self, use_mask):
self.create_data(use_mask)
t0_diagonal = self.t0.diagonal()
assert t0_diagonal.shape == (5,)
assert np.all(t0_diagonal.jd1 == self.t0.jd1.diagonal())
assert t0_diagonal.location is None
assert np.may_share_memory(t0_diagonal.jd1, self.t0.jd1)
t1_diagonal = self.t1.diagonal()
assert t1_diagonal.shape == (5,)
assert np.all(t1_diagonal.jd1 == self.t1.jd1.diagonal())
assert t1_diagonal.location is self.t1.location
assert np.may_share_memory(t1_diagonal.jd1, self.t1.jd1)
t2_diagonal = self.t2.diagonal()
assert t2_diagonal.shape == (5,)
assert np.all(t2_diagonal.jd1 == self.t2.jd1.diagonal())
assert t2_diagonal.location.shape == t2_diagonal.shape
assert np.may_share_memory(t2_diagonal.jd1, self.t2.jd1)
assert np.may_share_memory(t2_diagonal.location, self.t2.location)
def test_swapaxes(self, use_mask):
self.create_data(use_mask)
t0_swapaxes = self.t0.swapaxes(0, 1)
assert t0_swapaxes.shape == (5, 10)
assert np.all(t0_swapaxes.jd1 == self.t0.jd1.swapaxes(0, 1))
assert np.may_share_memory(t0_swapaxes.jd1, self.t0.jd1)
assert t0_swapaxes.location is None
t1_swapaxes = self.t1.swapaxes(0, 1)
assert t1_swapaxes.shape == (5, 10)
assert np.all(t1_swapaxes.jd1 == self.t1.jd1.swapaxes(0, 1))
assert np.may_share_memory(t1_swapaxes.jd1, self.t1.jd1)
assert t1_swapaxes.location is self.t1.location
t2_swapaxes = self.t2.swapaxes(0, 1)
assert t2_swapaxes.shape == (5, 10)
assert np.all(t2_swapaxes.jd1 == self.t2.jd1.swapaxes(0, 1))
assert np.may_share_memory(t2_swapaxes.jd1, self.t2.jd1)
assert t2_swapaxes.location.shape == t2_swapaxes.shape
assert np.may_share_memory(t2_swapaxes.location, self.t2.location)
def test_reshape(self, use_mask):
self.create_data(use_mask)
t0_reshape = self.t0.reshape(5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert np.may_share_memory(t0_reshape.jd1, self.t0.jd1)
assert np.may_share_memory(t0_reshape.jd2, self.t0.jd2)
assert t0_reshape.location is None
t1_reshape = self.t1.reshape(2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
assert np.may_share_memory(t1_reshape.jd1, self.t1.jd1)
assert t1_reshape.location is self.t1.location
# For reshape(5, 2, 5), the location array can remain the same.
t2_reshape = self.t2.reshape(5, 2, 5)
assert t2_reshape.shape == (5, 2, 5)
assert np.all(t2_reshape.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_reshape.jd1, self.t2.jd1)
assert t2_reshape.location.shape == t2_reshape.shape
assert np.may_share_memory(t2_reshape.location, self.t2.location)
# But for reshape(5, 5, 2), location has to be broadcast and copied.
t2_reshape2 = self.t2.reshape(5, 5, 2)
assert t2_reshape2.shape == (5, 5, 2)
assert np.all(t2_reshape2.jd1 == self.t2.jd1.reshape(5, 5, 2))
assert np.may_share_memory(t2_reshape2.jd1, self.t2.jd1)
assert t2_reshape2.location.shape == t2_reshape2.shape
assert not np.may_share_memory(t2_reshape2.location, self.t2.location)
t2_reshape_t = self.t2.reshape(10, 5).T
assert t2_reshape_t.shape == (5, 10)
assert np.may_share_memory(t2_reshape_t.jd1, self.t2.jd1)
assert t2_reshape_t.location.shape == t2_reshape_t.shape
assert np.may_share_memory(t2_reshape_t.location, self.t2.location)
# Finally, reshape in a way that cannot be a view.
t2_reshape_t_reshape = t2_reshape_t.reshape(10, 5)
assert t2_reshape_t_reshape.shape == (10, 5)
assert not np.may_share_memory(t2_reshape_t_reshape.jd1, self.t2.jd1)
assert t2_reshape_t_reshape.location.shape == t2_reshape_t_reshape.shape
assert not np.may_share_memory(
t2_reshape_t_reshape.location, t2_reshape_t.location
)
def test_squeeze(self, use_mask):
self.create_data(use_mask)
t0_squeeze = self.t0.reshape(5, 1, 2, 1, 5).squeeze()
assert t0_squeeze.shape == (5, 2, 5)
assert np.all(t0_squeeze.jd1 == self.t0.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t0_squeeze.jd1, self.t0.jd1)
assert t0_squeeze.location is None
t1_squeeze = self.t1.reshape(1, 5, 1, 2, 5).squeeze()
assert t1_squeeze.shape == (5, 2, 5)
assert np.all(t1_squeeze.jd1 == self.t1.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t1_squeeze.jd1, self.t1.jd1)
assert t1_squeeze.location is self.t1.location
t2_squeeze = self.t2.reshape(1, 1, 5, 2, 5, 1, 1).squeeze()
assert t2_squeeze.shape == (5, 2, 5)
assert np.all(t2_squeeze.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_squeeze.jd1, self.t2.jd1)
assert t2_squeeze.location.shape == t2_squeeze.shape
assert np.may_share_memory(t2_squeeze.location, self.t2.location)
def test_add_dimension(self, use_mask):
self.create_data(use_mask)
t0_adddim = self.t0[:, np.newaxis, :]
assert t0_adddim.shape == (10, 1, 5)
assert np.all(t0_adddim.jd1 == self.t0.jd1[:, np.newaxis, :])
assert np.may_share_memory(t0_adddim.jd1, self.t0.jd1)
assert t0_adddim.location is None
t1_adddim = self.t1[:, :, np.newaxis]
assert t1_adddim.shape == (10, 5, 1)
assert np.all(t1_adddim.jd1 == self.t1.jd1[:, :, np.newaxis])
assert np.may_share_memory(t1_adddim.jd1, self.t1.jd1)
assert t1_adddim.location is self.t1.location
t2_adddim = self.t2[:, :, np.newaxis]
assert t2_adddim.shape == (10, 5, 1)
assert np.all(t2_adddim.jd1 == self.t2.jd1[:, :, np.newaxis])
assert np.may_share_memory(t2_adddim.jd1, self.t2.jd1)
assert t2_adddim.location.shape == t2_adddim.shape
assert np.may_share_memory(t2_adddim.location, self.t2.location)
def test_take(self, use_mask):
self.create_data(use_mask)
t0_take = self.t0.take((5, 2))
assert t0_take.shape == (2,)
assert np.all(t0_take.jd1 == self.t0._time.jd1.take((5, 2)))
assert t0_take.location is None
t1_take = self.t1.take((2, 4), axis=1)
assert t1_take.shape == (10, 2)
assert np.all(t1_take.jd1 == self.t1.jd1.take((2, 4), axis=1))
assert t1_take.location is self.t1.location
t2_take = self.t2.take((1, 3, 7), axis=0)
assert t2_take.shape == (3, 5)
assert np.all(t2_take.jd1 == self.t2.jd1.take((1, 3, 7), axis=0))
assert t2_take.location.shape == t2_take.shape
t2_take2 = self.t2.take((5, 15))
assert t2_take2.shape == (2,)
assert np.all(t2_take2.jd1 == self.t2.jd1.take((5, 15)))
assert t2_take2.location.shape == t2_take2.shape
def test_broadcast_via_apply(self, use_mask):
"""Test using a callable method."""
self.create_data(use_mask)
t0_broadcast = self.t0._apply(np.broadcast_to, shape=(3, 10, 5))
assert t0_broadcast.shape == (3, 10, 5)
assert np.all(t0_broadcast.jd1 == self.t0.jd1)
assert np.may_share_memory(t0_broadcast.jd1, self.t0.jd1)
assert t0_broadcast.location is None
t1_broadcast = self.t1._apply(np.broadcast_to, shape=(3, 10, 5))
assert t1_broadcast.shape == (3, 10, 5)
assert np.all(t1_broadcast.jd1 == self.t1.jd1)
assert np.may_share_memory(t1_broadcast.jd1, self.t1.jd1)
assert t1_broadcast.location is self.t1.location
t2_broadcast = self.t2._apply(np.broadcast_to, shape=(3, 10, 5))
assert t2_broadcast.shape == (3, 10, 5)
assert np.all(t2_broadcast.jd1 == self.t2.jd1)
assert np.may_share_memory(t2_broadcast.jd1, self.t2.jd1)
assert t2_broadcast.location.shape == t2_broadcast.shape
assert np.may_share_memory(t2_broadcast.location, self.t2.location)
@pytest.mark.parametrize("use_mask", ("masked", "not_masked"))
class TestSetShape(ShapeSetup):
def test_shape_setting(self, use_mask):
# Shape-setting should be on the object itself, since copying removes
# zero-strides due to broadcasting. Hence, this should be the only
# test in this class.
self.create_data(use_mask)
t0_reshape = self.t0.copy()
mjd = t0_reshape.mjd # Creates a cache of the mjd attribute
t0_reshape.shape = (5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert mjd.shape != t0_reshape.mjd.shape # Cache got cleared
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert t0_reshape.location is None
# But if the shape doesn't work, one should get an error.
t0_reshape_t = t0_reshape.T
with pytest.raises(ValueError):
t0_reshape_t.shape = (12,) # Wrong number of elements.
with pytest.raises(AttributeError):
t0_reshape_t.shape = (10, 5) # Cannot be done without copy.
# check no shape was changed.
assert t0_reshape_t.shape == t0_reshape.T.shape
assert t0_reshape_t.jd1.shape == t0_reshape.T.shape
assert t0_reshape_t.jd2.shape == t0_reshape.T.shape
t1_reshape = self.t1.copy()
t1_reshape.shape = (2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
# location is a single element, so its shape should not change.
assert t1_reshape.location.shape == ()
# For reshape(5, 2, 5), the location array can remain the same.
# Note that we need to work directly on self.t2 here, since any
# copy would cause location to have the full shape.
self.t2.shape = (5, 2, 5)
assert self.t2.shape == (5, 2, 5)
assert self.t2.jd1.shape == (5, 2, 5)
assert self.t2.jd2.shape == (5, 2, 5)
assert self.t2.location.shape == (5, 2, 5)
assert self.t2.location.strides == (0, 0, 24)
# But for reshape(50), location would need to be copied, so this
# should fail.
oldshape = self.t2.shape
with pytest.raises(AttributeError):
self.t2.shape = (50,)
# check no shape was changed.
assert self.t2.jd1.shape == oldshape
assert self.t2.jd2.shape == oldshape
assert self.t2.location.shape == oldshape
@pytest.mark.parametrize("use_mask", ("masked", "not_masked"))
class TestShapeFunctions(ShapeSetup):
@needs_array_function
def test_broadcast(self, use_mask):
"""Test as supported numpy function."""
self.create_data(use_mask)
t0_broadcast = np.broadcast_to(self.t0, shape=(3, 10, 5))
assert t0_broadcast.shape == (3, 10, 5)
assert np.all(t0_broadcast.jd1 == self.t0.jd1)
assert np.may_share_memory(t0_broadcast.jd1, self.t0.jd1)
assert t0_broadcast.location is None
t1_broadcast = np.broadcast_to(self.t1, shape=(3, 10, 5))
assert t1_broadcast.shape == (3, 10, 5)
assert np.all(t1_broadcast.jd1 == self.t1.jd1)
assert np.may_share_memory(t1_broadcast.jd1, self.t1.jd1)
assert t1_broadcast.location is self.t1.location
t2_broadcast = np.broadcast_to(self.t2, shape=(3, 10, 5))
assert t2_broadcast.shape == (3, 10, 5)
assert np.all(t2_broadcast.jd1 == self.t2.jd1)
assert np.may_share_memory(t2_broadcast.jd1, self.t2.jd1)
assert t2_broadcast.location.shape == t2_broadcast.shape
assert np.may_share_memory(t2_broadcast.location, self.t2.location)
@needs_array_function
def test_atleast_1d(self, use_mask):
self.create_data(use_mask)
t00 = self.t0.ravel()[0]
assert t00.ndim == 0
t00_1d = np.atleast_1d(t00)
assert t00_1d.ndim == 1
assert_time_all_equal(t00[np.newaxis], t00_1d)
# Actual jd1 will not share memory, as cast to scalar.
assert np.may_share_memory(t00_1d._time.jd1, t00._time.jd1)
@needs_array_function
def test_atleast_2d(self, use_mask):
self.create_data(use_mask)
t0r = self.t0.ravel()
assert t0r.ndim == 1
t0r_2d = np.atleast_2d(t0r)
assert t0r_2d.ndim == 2
assert_time_all_equal(t0r[np.newaxis], t0r_2d)
assert np.may_share_memory(t0r_2d.jd1, t0r.jd1)
@needs_array_function
def test_atleast_3d(self, use_mask):
self.create_data(use_mask)
assert self.t0.ndim == 2
t0_3d, t1_3d = np.atleast_3d(self.t0, self.t1)
assert t0_3d.ndim == t1_3d.ndim == 3
assert_time_all_equal(self.t0[:, :, np.newaxis], t0_3d)
assert_time_all_equal(self.t1[:, :, np.newaxis], t1_3d)
assert np.may_share_memory(t0_3d.jd2, self.t0.jd2)
def test_move_axis(self, use_mask):
# Goes via transpose so works without __array_function__ as well.
self.create_data(use_mask)
t0_10 = np.moveaxis(self.t0, 0, 1)
assert t0_10.shape == (self.t0.shape[1], self.t0.shape[0])
assert_time_all_equal(self.t0.T, t0_10)
assert np.may_share_memory(t0_10.jd1, self.t0.jd1)
def test_roll_axis(self, use_mask):
# Goes via transpose so works without __array_function__ as well.
self.create_data(use_mask)
t0_10 = np.rollaxis(self.t0, 1)
assert t0_10.shape == (self.t0.shape[1], self.t0.shape[0])
assert_time_all_equal(self.t0.T, t0_10)
assert np.may_share_memory(t0_10.jd1, self.t0.jd1)
@needs_array_function
def test_fliplr(self, use_mask):
self.create_data(use_mask)
t0_lr = np.fliplr(self.t0)
assert_time_all_equal(self.t0[:, ::-1], t0_lr)
assert np.may_share_memory(t0_lr.jd2, self.t0.jd2)
@needs_array_function
def test_rot90(self, use_mask):
self.create_data(use_mask)
t0_270 = np.rot90(self.t0, 3)
assert_time_all_equal(self.t0.T[:, ::-1], t0_270)
assert np.may_share_memory(t0_270.jd2, self.t0.jd2)
@needs_array_function
def test_roll(self, use_mask):
self.create_data(use_mask)
t0r = np.roll(self.t0, 1, axis=0)
assert_time_all_equal(t0r[1:], self.t0[:-1])
assert_time_all_equal(t0r[0], self.t0[-1])
@needs_array_function
def test_delete(self, use_mask):
self.create_data(use_mask)
t0d = np.delete(self.t0, [2, 3], axis=0)
assert_time_all_equal(t0d[:2], self.t0[:2])
assert_time_all_equal(t0d[2:], self.t0[4:])
@pytest.mark.parametrize("use_mask", ("masked", "not_masked"))
class TestArithmetic:
"""Arithmetic on Time objects, using both doubles."""
kwargs = ({}, {"axis": None}, {"axis": 0}, {"axis": 1}, {"axis": 2})
functions = ("min", "max", "sort")
def setup_class(cls):
mjd = np.arange(50000, 50100, 10).reshape(2, 5, 1)
frac = np.array([0.1, 0.1 + 1.0e-15, 0.1 - 1.0e-15, 0.9 + 2.0e-16, 0.9])
frac_masked = np.ma.array(frac)
frac_masked[1] = np.ma.masked
cls.t0 = {
"not_masked": Time(mjd, frac, format="mjd", scale="utc"),
"masked": Time(mjd, frac_masked, format="mjd", scale="utc"),
}
# Define arrays with same ordinal properties
frac = np.array([1, 2, 0, 4, 3])
frac_masked = np.ma.array(frac)
frac_masked[1] = np.ma.masked
cls.t1 = {
"not_masked": Time(mjd + frac, format="mjd", scale="utc"),
"masked": Time(mjd + frac_masked, format="mjd", scale="utc"),
}
cls.jd = {"not_masked": mjd + frac, "masked": mjd + frac_masked}
cls.t2 = {
"not_masked": Time(
mjd + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
),
"masked": Time(
mjd + frac_masked,
format="mjd",
scale="utc",
location=(np.arange(len(frac_masked)), np.arange(len(frac_masked))),
),
}
def create_data(self, use_mask):
self.t0 = self.__class__.t0[use_mask]
self.t1 = self.__class__.t1[use_mask]
self.t2 = self.__class__.t2[use_mask]
self.jd = self.__class__.jd[use_mask]
@pytest.mark.parametrize("kw, func", itertools.product(kwargs, functions))
def test_argfuncs(self, kw, func, use_mask):
"""
Test that ``np.argfunc(jd, **kw)`` is the same as ``t0.argfunc(**kw)``
where ``jd`` is a similarly shaped array with the same ordinal properties
but all integer values. Also test the same for t1 which has the same
integral values as jd.
"""
self.create_data(use_mask)
t0v = getattr(self.t0, "arg" + func)(**kw)
t1v = getattr(self.t1, "arg" + func)(**kw)
jdv = getattr(np, "arg" + func)(self.jd, **kw)
if self.t0.masked and kw == {"axis": None} and func == "sort":
t0v = np.ma.array(t0v, mask=self.t0.mask.reshape(t0v.shape)[t0v])
t1v = np.ma.array(t1v, mask=self.t1.mask.reshape(t1v.shape)[t1v])
jdv = np.ma.array(jdv, mask=self.jd.mask.reshape(jdv.shape)[jdv])
assert np.all(t0v == jdv)
assert np.all(t1v == jdv)
assert t0v.shape == jdv.shape
assert t1v.shape == jdv.shape
@pytest.mark.parametrize("kw, func", itertools.product(kwargs, functions))
def test_funcs(self, kw, func, use_mask):
"""
Test that ``np.func(jd, **kw)`` is the same as ``t1.func(**kw)`` where
``jd`` is a similarly shaped array and the same integral values.
"""
self.create_data(use_mask)
t1v = getattr(self.t1, func)(**kw)
jdv = getattr(np, func)(self.jd, **kw)
assert np.all(t1v.value == jdv)
assert t1v.shape == jdv.shape
def test_argmin(self, use_mask):
self.create_data(use_mask)
assert self.t0.argmin() == 2
assert np.all(self.t0.argmin(axis=0) == 0)
assert np.all(self.t0.argmin(axis=1) == 0)
assert np.all(self.t0.argmin(axis=2) == 2)
def test_argmax(self, use_mask):
self.create_data(use_mask)
assert self.t0.argmax() == self.t0.size - 2
if use_mask == "masked":
# The 0 is where all entries are masked in that axis
assert np.all(self.t0.argmax(axis=0) == [1, 0, 1, 1, 1])
assert np.all(self.t0.argmax(axis=1) == [4, 0, 4, 4, 4])
else:
assert np.all(self.t0.argmax(axis=0) == 1)
assert np.all(self.t0.argmax(axis=1) == 4)
assert np.all(self.t0.argmax(axis=2) == 3)
def test_argsort(self, use_mask):
self.create_data(use_mask)
order = [2, 0, 4, 3, 1] if use_mask == "masked" else [2, 0, 1, 4, 3]
assert np.all(self.t0.argsort() == np.array(order))
assert np.all(self.t0.argsort(axis=0) == np.arange(2).reshape(2, 1, 1))
assert np.all(self.t0.argsort(axis=1) == np.arange(5).reshape(5, 1))
assert np.all(self.t0.argsort(axis=2) == np.array(order))
ravel = np.arange(50).reshape(-1, 5)[:, order].ravel()
if use_mask == "masked":
t0v = self.t0.argsort(axis=None)
# Manually remove elements in ravel that correspond to masked
# entries in self.t0. This removes the 10 entries that are masked
# which show up at the end of the list.
mask = self.t0.mask.ravel()[ravel]
ravel = ravel[~mask]
assert np.all(t0v[:-10] == ravel)
else:
assert np.all(self.t0.argsort(axis=None) == ravel)
@pytest.mark.parametrize("scale", Time.SCALES)
def test_argsort_warning(self, use_mask, scale):
self.create_data(use_mask)
if scale == "utc":
pytest.xfail()
with warnings.catch_warnings(record=True) as wlist:
Time([1, 2, 3], format="jd", scale=scale).argsort()
assert len(wlist) == 0
def test_min(self, use_mask):
self.create_data(use_mask)
assert self.t0.min() == self.t0[0, 0, 2]
assert np.all(self.t0.min(0) == self.t0[0])
assert np.all(self.t0.min(1) == self.t0[:, 0])
assert np.all(self.t0.min(2) == self.t0[:, :, 2])
assert self.t0.min(0).shape == (5, 5)
assert self.t0.min(0, keepdims=True).shape == (1, 5, 5)
assert self.t0.min(1).shape == (2, 5)
assert self.t0.min(1, keepdims=True).shape == (2, 1, 5)
assert self.t0.min(2).shape == (2, 5)
assert self.t0.min(2, keepdims=True).shape == (2, 5, 1)
def test_max(self, use_mask):
self.create_data(use_mask)
assert self.t0.max() == self.t0[-1, -1, -2]
assert np.all(self.t0.max(0) == self.t0[1])
assert np.all(self.t0.max(1) == self.t0[:, 4])
assert np.all(self.t0.max(2) == self.t0[:, :, 3])
assert self.t0.max(0).shape == (5, 5)
assert self.t0.max(0, keepdims=True).shape == (1, 5, 5)
def test_ptp(self, use_mask):
self.create_data(use_mask)
assert self.t0.ptp() == self.t0.max() - self.t0.min()
assert np.all(self.t0.ptp(0) == self.t0.max(0) - self.t0.min(0))
assert self.t0.ptp(0).shape == (5, 5)
assert self.t0.ptp(0, keepdims=True).shape == (1, 5, 5)
def test_sort(self, use_mask):
self.create_data(use_mask)
order = [2, 0, 4, 3, 1] if use_mask == "masked" else [2, 0, 1, 4, 3]
assert np.all(self.t0.sort() == self.t0[:, :, order])
assert np.all(self.t0.sort(0) == self.t0)
assert np.all(self.t0.sort(1) == self.t0)
assert np.all(self.t0.sort(2) == self.t0[:, :, order])
if use_mask == "not_masked":
assert np.all(self.t0.sort(None) == self.t0[:, :, order].ravel())
# Bit superfluous, but good to check.
assert np.all(self.t0.sort(-1)[:, :, 0] == self.t0.min(-1))
assert np.all(self.t0.sort(-1)[:, :, -1] == self.t0.max(-1))
@pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 1)])
@pytest.mark.parametrize(
"where", [True, np.array([True, False, True, True, False])[..., np.newaxis]]
)
@pytest.mark.parametrize("keepdims", [False, True])
def test_mean(self, use_mask, axis, where, keepdims):
self.create_data(use_mask)
kwargs = dict(axis=axis, where=where, keepdims=keepdims)
def is_consistent(time):
where_expected = where & ~time.mask
where_expected = np.broadcast_to(where_expected, time.shape)
kw = kwargs.copy()
kw["where"] = where_expected
divisor = where_expected.sum(axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
with pytest.raises(ValueError):
time.mean(**kwargs)
else:
time_mean = time.mean(**kwargs)
time_expected = Time(
*day_frac(
val1=np.ma.getdata(time.tai.jd1).sum(**kw),
val2=np.ma.getdata(time.tai.jd2).sum(**kw),
divisor=divisor,
),
format="jd",
scale="tai",
)
time_expected._set_scale(time.scale)
assert np.all(time_mean == time_expected)
is_consistent(self.t0)
is_consistent(self.t1)
axes_location_not_constant = [None, 2]
if axis in axes_location_not_constant:
with pytest.raises(ValueError):
self.t2.mean(**kwargs)
else:
is_consistent(self.t2)
def test_mean_precision(self, use_mask):
scale = "tai"
epsilon = 1 * u.ns
t0 = Time("2021-07-27T00:00:00", scale=scale)
t1 = Time("2022-07-27T00:00:00", scale=scale)
t2 = Time("2023-07-27T00:00:00", scale=scale)
t = Time([t0, t2 + epsilon])
if use_mask == "masked":
t[0] = np.ma.masked
assert t.mean() == (t2 + epsilon)
else:
assert t.mean() == (t1 + epsilon / 2)
def test_mean_dtype(self, use_mask):
self.create_data(use_mask)
with pytest.raises(ValueError):
self.t0.mean(dtype=int)
def test_mean_out(self, use_mask):
self.create_data(use_mask)
with pytest.raises(ValueError):
self.t0.mean(out=Time(np.zeros_like(self.t0.jd1), format="jd"))
def test_mean_leap_second(self, use_mask):
# Check that leap second is dealt with correctly: for UTC, across a leap
# second boundary, one cannot just average jd, but has to go through TAI.
if use_mask == "not_masked":
t = Time(["2012-06-30 23:59:60.000", "2012-07-01 00:00:01.000"])
mean_expected = t[0] + (t[1] - t[0]) / 2
mean_expected_explicit = Time("2012-07-01 00:00:00")
mean_test = t.mean()
assert mean_expected == mean_expected_explicit
assert mean_expected == mean_test
assert mean_test != Time(
*day_frac(t.jd1.sum(), t.jd2.sum(), divisor=2), format="jd"
)
def test_regression():
# For #5225, where a time with a single-element delta_ut1_utc could not
# be copied, flattened, or ravelled. (For copy, it is in test_basic.)
with iers.conf.set_temp("auto_download", False):
t = Time(49580.0, scale="tai", format="mjd")
t_ut1 = t.ut1
t_ut1_copy = copy.deepcopy(t_ut1)
assert type(t_ut1_copy.delta_ut1_utc) is np.ndarray
t_ut1_flatten = t_ut1.flatten()
assert type(t_ut1_flatten.delta_ut1_utc) is np.ndarray
t_ut1_ravel = t_ut1.ravel()
assert type(t_ut1_ravel.delta_ut1_utc) is np.ndarray
assert t_ut1_copy.delta_ut1_utc == t_ut1.delta_ut1_utc
|
a5704e3d9bafa55dafc0df615302c89892cad74fcf52edfb7de4b8984cc53f2b | # Licensed under a 3-clause BSD style license. See LICENSE.rst except
# for parts explicitly labelled as being (largely) copies of numpy
# implementations; for those, see licenses/NUMPY_LICENSE.rst.
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from numpy.lib import recfunctions as rfn
from astropy.units.core import (
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
from astropy.utils import isiterable
from astropy.utils.compat import NUMPY_LT_1_23
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides, "ENABLE_ARRAY_FUNCTION", True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diagonal, np.diagflat, np.empty_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.median, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis, np.take_along_axis, np.put_along_axis,
np.linalg.cond, np.linalg.multi_dot,
} # fmt: skip
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {np.ediff1d}
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday, np.all, np.any, np.sometrue, np.alltrue,
} # fmt: skip
# Could be supported if we had a natural logarithm unit.
UNSUPPORTED_FUNCTIONS |= {np.linalg.slogdet}
TBD_FUNCTIONS = {
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.find_duplicates, rfn.recursive_fill_fields, rfn.require_fields,
rfn.repack_fields, rfn.stack_arrays,
} # fmt: skip
UNSUPPORTED_FUNCTIONS |= TBD_FUNCTIONS
IGNORED_FUNCTIONS = {
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
# functions taking record arrays (which are deprecated)
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
} # fmt: skip
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None, module=np):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(module, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None or module is not np:
return functools.partial(self.__call__, helps=helps, module=module)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
# fmt: off
@function_helper(
helps={
np.copy, np.asfarray, np.real_if_close, np.sort_complex, np.resize,
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft,
np.fft.fft2, np.fft.ifft2, np.fft.rfft2, np.fft.irfft2,
np.fft.fftn, np.fft.ifftn, np.fft.rfftn, np.fft.irfftn,
np.fft.hfft, np.fft.ihfft,
np.linalg.eigvals, np.linalg.eigvalsh,
}
)
# fmt: on
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
@function_helper(helps={np.fft.fftshift, np.fft.ifftshift})
def invariant_x_helper(x, *args, **kwargs):
return (x.view(np.ndarray),) + args, kwargs, x.unit, None
# Note that ones_like does *not* work by default since if one creates an empty
# array with a unit, one cannot just fill it with unity. Indeed, in this
# respect, it is a bit of an odd function for Quantity. On the other hand, it
# matches the idea that a unit is the same as the quantity with that unit and
# value of 1. Also, it used to work without __array_function__.
# zeros_like does work by default for regular quantities, because numpy first
# creates an empty array with the unit and then fills it with 0 (which can have
# any unit), but for structured dtype this fails (0 cannot have an arbitrary
# structured unit), so we include it here too.
@function_helper(helps={np.ones_like, np.zeros_like})
def like_helper(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop("subok", True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError(
"Can only apply 'sinc' function to quantities with angle units"
)
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(
p.to_value(radian), discont.to_value(radian), axis=axis
)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get("subok", True) else None
return (a.view(np.ndarray), a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask, a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask, values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask, arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask, vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return (dst.view(np.ndarray), dst._to_own_unit(src)) + args, kwargs, None, None
elif isinstance(src, Quantity):
return (dst, src.to_value(dimensionless_unscaled)) + args, kwargs, None, None
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return (
(x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit,
None,
)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
# Note: this should keep the dtype the same
return tuple(Quantity(a, copy=False, subok=True, dtype=None) for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args, unit_from_first=False):
"""Convert to arrays in units of the first argument that has a unit.
If unit_from_first, take the unit of the first argument regardless
whether it actually defined a unit (e.g., dimensionless for arrays).
"""
# Turn first argument into a quantity.
q = _as_quantity(args[0])
if len(args) == 1:
return (q.value,), q.unit
# If we care about the unit being explicit, then check whether this
# argument actually had a unit, or was likely inferred.
if not unit_from_first and (
q.unit is q._default_unit and not hasattr(args[0], "unit")
):
# Here, the argument could still be things like [10*u.one, 11.*u.one]),
# i.e., properly dimensionless. So, we only override with anything
# that has a unit not equivalent to dimensionless (fine to ignore other
# dimensionless units pass, even if explicitly given).
for arg in args[1:]:
trial = _as_quantity(arg)
if not trial.unit.is_equivalent(q.unit):
# Use any explicit unit not equivalent to dimensionless.
q = trial
break
# We use the private _to_own_unit method here instead of just
# converting everything to quantity and then do .to_value(qs0.unit)
# as we want to allow arbitrary unit for 0, inf, and nan.
try:
arrays = tuple((q._to_own_unit(arg)) for arg in args)
except TypeError:
raise NotImplementedError
return arrays, q.unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs["out"] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
@function_helper
def concatenate(arrays, axis=0, out=None, **kwargs):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis, **kwargs)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim, final_size) = np.core.shape_base._block_setup(
arrays
)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values, unit_from_first=True)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values, unit_from_first=True)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode="constant", **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in "constant_values", "end_values":
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple)
else array._to_own_unit(v)
)
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
@function_helper(helps=({np.quantile, np.nanquantile}))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop("out", None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper(helps={np.isclose, np.allclose})
def close(a, b, rtol=1e-05, atol=1e-08, *args, **kwargs):
from astropy.units import Quantity
(a, b), unit = _quantities2arrays(a, b, unit_from_first=True)
# Allow number without a unit as having the unit.
atol = Quantity(atol, unit).value
return (a, b, rtol, atol) + args, kwargs, None, None
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
try:
args, unit = _quantities2arrays(a1, a2)
except UnitConversionError:
return False, None, None
return np.array_equal(*args, equal_nan=equal_nan), None, None
@dispatched_function
def array_equiv(a1, a2):
try:
args, unit = _quantities2arrays(a1, a2)
except UnitConversionError:
return False, None, None
return np.array_equiv(*args), None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(
helps={
np.cross,
np.inner,
np.vdot,
np.tensordot,
np.kron,
np.correlate,
np.convolve,
}
)
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs["out"] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs), dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins, unit_from_first=True)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if density:
unit = (unit or 1) / a.unit
return (
(a.value, bins, range),
{"weights": weights, "density": density},
(unit, a.unit),
None,
)
@function_helper(helps=np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if density:
unit = (unit or 1) / x.unit / y.unit
return (
(x.value, y.value, bins, range),
{"weights": weights, "density": density},
(unit, x.unit, y.unit),
None,
)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
"The dimension of bins must be equal to the dimension of the sample x."
)
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, sample_units)]
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return (
(sample, bins, range),
{"weights": weights, "density": density},
(unit, sample_units),
None,
)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get("axis", None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if not isinstance(start, LogQuantity) or not isinstance(stop, LogQuantity):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(
ar, return_index=False, return_inverse=False, return_counts=False, axis=None
):
unit = ar.unit
n_index = sum(bool(i) for i in (return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts, axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
@dispatched_function
def array_repr(arr, *args, **kwargs):
# TODO: The addition of "unit='...'" doesn't worry about line
# length. Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
cls_name = arr.__class__.__name__
fake_name = "_" * len(cls_name)
fake_cls = type(fake_name, (np.ndarray,), {})
no_unit = np.array_repr(arr.view(fake_cls), *args, **kwargs).replace(
fake_name, cls_name
)
unit_part = f"unit='{arr.unit}'"
pre, dtype, post = no_unit.rpartition("dtype")
if dtype:
return f"{pre}{unit_part}, {dtype}{post}", None, None
else:
return f"{no_unit[:-1]}, {unit_part})", None, None
@dispatched_function
def array_str(arr, *args, **kwargs):
# TODO: The addition of the unit doesn't worry about line length.
# Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
no_unit = np.array_str(arr.value, *args, **kwargs)
return no_unit + arr._unitstr, None, None
@function_helper
def array2string(a, *args, **kwargs):
# array2string breaks on quantities as it tries to turn individual
# items into float, which works only for dimensionless. Since the
# defaults would not keep any unit anyway, this is rather pointless -
# we're better off just passing on the array view. However, one can
# also work around this by passing on a formatter (as is done in Angle).
# So, we do nothing if the formatter argument is present and has the
# relevant formatter for our dtype.
formatter = args[6] if len(args) >= 7 else kwargs.get("formatter", None)
if formatter is None:
a = a.value
else:
# See whether it covers our dtype.
from numpy.core.arrayprint import _get_format_function
with np.printoptions(formatter=formatter) as options:
try:
ff = _get_format_function(a.value, **options)
except Exception:
# Shouldn't happen, but possibly we're just not being smart
# enough, so let's pass things on as is.
pass
else:
# If the selected format function is that of numpy, we know
# things will fail
if "numpy" in ff.__module__:
a = a.value
return (a,) + args, kwargs, None, None
@function_helper
def diag(v, *args, **kwargs):
# Function works for *getting* the diagonal, but not *setting*.
# So, override always.
return (v.value,) + args, kwargs, v.unit, None
@function_helper(module=np.linalg)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
unit = a.unit
if compute_uv:
unit = (None, unit, None)
return ((a.view(np.ndarray), full_matrices, compute_uv, hermitian), {}, unit, None)
def _interpret_tol(tol, unit):
from astropy.units import Quantity
return Quantity(tol, unit).value
@function_helper(module=np.linalg)
def matrix_rank(M, tol=None, *args, **kwargs):
if tol is not None:
tol = _interpret_tol(tol, M.unit)
return (M.view(np.ndarray), tol) + args, kwargs, None, None
@function_helper(helps={np.linalg.inv, np.linalg.tensorinv})
def inv(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def pinv(a, rcond=1e-15, *args, **kwargs):
rcond = _interpret_tol(rcond, a.unit)
return (a.view(np.ndarray), rcond) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def det(a):
return (a.view(np.ndarray),), {}, a.unit ** a.shape[-1], None
@function_helper(helps={np.linalg.solve, np.linalg.tensorsolve})
def solve(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
return (
(a.view(np.ndarray), b.view(np.ndarray)) + args,
kwargs,
b.unit / a.unit,
None,
)
@function_helper(module=np.linalg)
def lstsq(a, b, rcond="warn"):
a, b = _as_quantities(a, b)
if rcond not in (None, "warn", -1):
rcond = _interpret_tol(rcond, a.unit)
return (
(a.view(np.ndarray), b.view(np.ndarray), rcond),
{},
(b.unit / a.unit, b.unit**2, None, a.unit),
None,
)
@function_helper(module=np.linalg)
def norm(x, ord=None, *args, **kwargs):
if ord == 0:
from astropy.units import dimensionless_unscaled
unit = dimensionless_unscaled
else:
unit = x.unit
return (x.view(np.ndarray), ord) + args, kwargs, unit, None
@function_helper(module=np.linalg)
def matrix_power(a, n):
return (a.value, n), {}, a.unit**n, None
@function_helper(module=np.linalg)
def cholesky(a):
return (a.value,), {}, a.unit**0.5, None
@function_helper(module=np.linalg)
def qr(a, mode="reduced"):
if mode.startswith("e"):
units = None
elif mode == "r":
units = a.unit
else:
from astropy.units import dimensionless_unscaled
units = (dimensionless_unscaled, a.unit)
return (a.value, mode), {}, units, None
@function_helper(helps={np.linalg.eig, np.linalg.eigh})
def eig(a, *args, **kwargs):
from astropy.units import dimensionless_unscaled
return (a.value,) + args, kwargs, (a.unit, dimensionless_unscaled), None
# ======================= np.lib.recfunctions =======================
@function_helper(module=np.lib.recfunctions)
def structured_to_unstructured(arr, *args, **kwargs):
"""
Convert a structured quantity to an unstructured one.
This only works if all the units are compatible.
"""
from astropy.units import StructuredUnit
target_unit = arr.unit.values()[0]
def replace_unit(x):
if isinstance(x, StructuredUnit):
return x._recursively_apply(replace_unit)
else:
return target_unit
to_unit = arr.unit._recursively_apply(replace_unit)
return (arr.to_value(to_unit),) + args, kwargs, target_unit, None
def _build_structured_unit(dtype, unit):
"""Build structured unit from dtype
Parameters
----------
dtype : `numpy.dtype`
unit : `astropy.units.Unit`
Returns
-------
`astropy.units.Unit` or tuple
"""
if dtype.fields is None:
return unit
return tuple(_build_structured_unit(v[0], unit) for v in dtype.fields.values())
@function_helper(module=np.lib.recfunctions)
def unstructured_to_structured(arr, dtype, *args, **kwargs):
from astropy.units import StructuredUnit
target_unit = StructuredUnit(_build_structured_unit(dtype, arr.unit))
return (arr.to_value(arr.unit), dtype) + args, kwargs, target_unit, None
def _izip_units_flat(iterable):
"""Returns an iterator of collapsing any nested unit structure.
Parameters
----------
iterable : Iterable[StructuredUnit | Unit] or StructuredUnit
A structured unit or iterable thereof.
Yields
------
unit
"""
from astropy.units import StructuredUnit
# Make Structured unit (pass-through if it is already).
units = StructuredUnit(iterable)
# Yield from structured unit.
for v in units.values():
if isinstance(v, StructuredUnit):
yield from _izip_units_flat(v)
else:
yield v
@function_helper(helps=rfn.merge_arrays)
def merge_arrays(
seqarrays,
fill_value=-1,
flatten=False,
usemask=False,
asrecarray=False,
):
"""Merge structured Quantities field by field.
Like :func:`numpy.lib.recfunctions.merge_arrays`. Note that ``usemask`` and
``asrecarray`` are not supported at this time and will raise a ValueError if
not `False`.
"""
from astropy.units import Quantity, StructuredUnit
if asrecarray:
# TODO? implement if Quantity ever supports rec.array
raise ValueError("asrecarray=True is not supported.")
if usemask:
# TODO: use MaskedQuantity for this case
raise ValueError("usemask=True is not supported.")
# Do we have a single Quantity as input?
if isinstance(seqarrays, Quantity):
seqarrays = (seqarrays,)
# Note: this also converts ndarray -> Quantity[dimensionless]
seqarrays = _as_quantities(*seqarrays)
arrays = tuple(q.value for q in seqarrays)
units = tuple(q.unit for q in seqarrays)
if flatten:
unit = StructuredUnit(tuple(_izip_units_flat(units)))
elif len(arrays) == 1:
unit = StructuredUnit(units[0])
else:
unit = StructuredUnit(units)
return (
(arrays,),
dict(
fill_value=fill_value,
flatten=flatten,
usemask=usemask,
asrecarray=asrecarray,
),
unit,
None,
)
|
95ed3362d65df5f6ae306f316fd428e0942d9d303d14ae1ce1b019457028eff7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
import itertools
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g / u.s**2, u.Jy)
class TestLogUnitCreation:
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize("lu_unit, lu_cls", zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize("lu_unit", lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize(
"lu_unit, physical_unit", itertools.product(lu_units, pu_sample)
)
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize(
"lu_cls, physical_unit",
itertools.product(lu_subclasses + [u.LogUnit], pu_sample),
)
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit, function_unit=2 * lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2 * lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_lshift_magnitude(self):
mag = 1.0 << u.ABmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.ABmag
assert mag.value == 1.0
# same test for an array, which should produce a view
a2 = np.arange(10.0)
q2 = a2 << u.ABmag
assert isinstance(q2, u.Magnitude)
assert q2.unit == u.ABmag
assert np.all(q2.value == a2)
a2[9] = 0.0
assert np.all(q2.value == a2)
# a different magnitude unit
mag = 10.0 << u.STmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.STmag
assert mag.value == 10.0
def test_ilshift_magnitude(self):
# test in-place operation and conversion
mag_fnu_cgs = u.mag(u.erg / u.s / u.cm**2 / u.Hz)
m = np.arange(10.0) * u.mag(u.Jy)
jy = m.physical
m2 = m << mag_fnu_cgs
assert np.all(m2 == m.to(mag_fnu_cgs))
m2 = m
m <<= mag_fnu_cgs
assert m is m2 # Check it was done in-place!
assert np.all(m.value == m2.value)
assert m.unit == mag_fnu_cgs
# Check it works if equivalencies are in-place.
with u.add_enabled_equivalencies(u.spectral_density(5500 * u.AA)):
st = jy.to(u.ST)
m <<= u.STmag
assert m is m2
assert_quantity_allclose(m.physical, st)
assert m.unit == u.STmag
def test_lshift_errors(self):
m = np.arange(10.0) * u.mag(u.Jy)
with pytest.raises(u.UnitsError):
m << u.STmag
with pytest.raises(u.UnitsError):
m << u.Jy
with pytest.raises(u.UnitsError):
m <<= u.STmag
with pytest.raises(u.UnitsError):
m <<= u.Jy
def test_predefined_magnitudes():
assert_quantity_allclose(
(-21.1 * u.STmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.AA
)
assert_quantity_allclose(
(-48.6 * u.ABmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.Hz
)
assert_quantity_allclose((0 * u.M_bol).physical, c.L_bol0)
assert_quantity_allclose(
(0 * u.m_bol).physical, c.L_bol0 / (4.0 * np.pi * (10.0 * c.pc) ** 2)
)
def test_predefined_reinitialisation():
assert u.mag("STflux") == u.STmag
assert u.mag("ABflux") == u.ABmag
assert u.mag("Bol") == u.M_bol
assert u.mag("bol") == u.m_bol
# required for backwards-compatibility, at least unless deprecated
assert u.mag("ST") == u.STmag
assert u.mag("AB") == u.ABmag
def test_predefined_string_roundtrip():
"""Ensure round-tripping; see #5015"""
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regression for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings:
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == "mag(Jy)"
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string("generic") == "mag(Jy)"
with pytest.raises(ValueError):
lu1.to_string("fits")
with pytest.raises(ValueError):
lu1.to_string(format="cds")
lu2 = u.dex()
assert str(lu2) == "dex"
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == "dex(1)"
lu3 = u.MagUnit(u.Jy, function_unit=2 * u.mag)
assert str(lu3) == "2 mag(Jy)"
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == "2 mag(Jy)"
lu4 = u.mag(u.ct)
assert lu4.to_string("generic") == "mag(ct)"
latex_str = r"$\mathrm{mag}$$\mathrm{\left( \mathrm{ct} \right)}$"
assert lu4.to_string("latex") == latex_str
assert lu4.to_string("latex_inline") == latex_str
assert lu4._repr_latex_() == latex_str
lu5 = u.mag(u.ct / u.s)
assert lu5.to_string("latex") == (
r"$\mathrm{mag}$$\mathrm{\left( " r"\mathrm{\frac{ct}{s}} \right)}$"
)
latex_str = r"$\mathrm{mag}$$\mathrm{\left( \mathrm{ct\,s^{-1}} " r"\right)}$"
assert lu5.to_string("latex_inline") == latex_str
class TestLogUnitConversion:
@pytest.mark.parametrize(
"lu_unit, physical_unit", itertools.product(lu_units, pu_sample)
)
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.0) == 1.0
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.0) == 0.0
pu = u.Unit(8.0 * physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.0) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0.0, atol=1.0e-15)
# Check we round-trip.
value = np.linspace(0.0, 10.0, 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.0e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize("lu_unit", lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0.0, 10.0, 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
"flu_unit, tlu_unit, physical_unit",
itertools.product(lu_units, lu_units, pu_sample),
)
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0.0, 10.0, 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(
flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)
)
tlu2 = tlu_unit(u.Unit(100.0 * physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.0e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.0e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
def test_magnitude_conversion_fails_message(self):
"""Check that "dimensionless" magnitude units include a message in their
exception text suggesting a possible cause of the problem.
"""
with pytest.raises(
u.UnitConversionError,
match="Did you perhaps subtract magnitudes so the unit got lost?",
):
(10 * u.ABmag - 2 * u.ABmag).to(u.nJy)
class TestLogUnitArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(
t.to(u.dimensionless_unscaled, np.arange(3.0) / 100.0),
lu2.to(lu2.physical_unit, np.arange(3.0)),
)
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize("power", (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1**power == u.dimensionless_unscaled
elif power == 1:
assert lu1**power == lu1
else:
with pytest.raises(u.UnitsError):
lu1**power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t ** (1.0 / power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(
t2.to(u.dimensionless_unscaled, np.arange(3.0)),
lu2.to(lu2.physical_unit, np.arange(3.0)),
)
@pytest.mark.parametrize("other", pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.0
with pytest.raises(TypeError):
lu1 - [1.0, 2.0, 3.0]
@pytest.mark.parametrize(
"other",
(
u.mag,
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.Unit(2 * u.mag),
u.MagUnit("", 2.0 * u.mag),
),
)
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, "physical_unit", u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit("DM", 1.0 / (4.0 * np.pi * (10.0 * u.pc) ** 2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg / u.s / u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm / u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation:
@pytest.mark.parametrize(
"lq, lu", zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])
)
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.0)) is lq
@pytest.mark.parametrize(
"lq_cls, physical_unit", itertools.product(lq_subclasses, pu_sample)
)
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1.0, 10.0)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
"unit",
(
u.mag,
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.Unit(2 * u.mag),
u.MagUnit("", 2.0 * u.mag),
u.MagUnit(u.Jy, -1 * u.mag),
u.MagUnit(u.m, -2.0 * u.mag),
),
)
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, "function_unit", unit)
assert q.unit.physical_unit is getattr(
unit, "physical_unit", u.dimensionless_unscaled
)
@pytest.mark.parametrize(
"value, unit",
(
(1.0 * u.mag(u.Jy), None),
(1.0 * u.dex(u.Jy), None),
(1.0 * u.mag(u.W / u.m**2 / u.Hz), u.mag(u.Jy)),
(1.0 * u.dex(u.W / u.m**2 / u.Hz), u.mag(u.Jy)),
),
)
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(
unit, "physical_unit", value.unit.physical_unit
)
@pytest.mark.parametrize(
"unit",
(
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.MagUnit("", 2.0 * u.mag),
u.MagUnit(u.Jy, -1 * u.mag),
u.MagUnit(u.m, -2.0 * u.mag),
),
)
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100.0 * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.0
assert (q2._function_view / u.mag).to_value(1) == -5.0
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100.0, 1000.0] * u.cm / u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2.0, 3.0] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1.0, lu)
q = u.Quantity(1.0, lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10.0, 12.0, 14.0] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.0 * u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews:
def setup_method(self):
self.lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
self.lq2 = u.Magnitude(np.arange(1.0, 5.0))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.0
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2.0 * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing:
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1.0, 11.0) * u.Jy)
assert lq1[9] == u.Magnitude(10.0 * u.Jy)
lq1[2] = 100.0 * u.Jy
assert lq1[2] == u.Magnitude(100.0 * u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.0 * u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.0 * u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.0 * u.m)
assert lq1[2] == u.Magnitude(100.0 * u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
lq1[2:4] = 100.0 * u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.0 * u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.0 * u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.0 * u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.0 * u.m)
assert np.all(lq1[2] == u.Magnitude(100.0 * u.Jy))
class TestLogQuantityArithmetic:
@pytest.mark.parametrize(
"other",
[
2.4 * u.mag(),
12.34 * u.ABmag,
u.Magnitude(3.45 * u.Jy),
u.Dex(3.0),
u.Dex(np.linspace(3000, 5000, 10) * u.Angstrom),
u.Magnitude(6.78, 2.0 * u.mag),
],
)
@pytest.mark.parametrize("fac", [1.0, 2, 0.4])
def test_multiplication_division(self, other, fac):
"""Check that multiplication and division work as expected"""
lq_sf = fac * other
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other * fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other / fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
lq_sf = other.copy()
lq_sf *= fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other.copy()
lq_sf /= fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
def test_more_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this keeps
the result as a LogQuantity if possible."""
lq = u.Magnitude(np.arange(1.0, 11.0) * u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.0 * u.m)
with pytest.raises(u.UnitsError):
(1.0 * u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.0))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
lq_sf = lq.copy()
with pytest.raises(u.UnitsError):
lq_sf *= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
with pytest.raises(u.UnitsError):
lq_sf /= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.0)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value / 2.0)
# And multiplying with a dimensionless array is also OK.
r2 = lq2 * np.arange(10.0)
assert isinstance(r2, u.Magnitude)
assert np.all(r2 == lq2._function_view * np.arange(10.0))
# with dimensionless, normal units OK, but return normal quantities
# if the unit no longer is consistent with the logarithmic unit.
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.0 * u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view * 2)
@pytest.mark.parametrize("power", (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
if power == 0:
assert np.all(lq**power == 1.0)
elif power == 1:
assert np.all(lq**power == lq)
else:
with pytest.raises(u.UnitsError):
lq**power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.0))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.0)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit**power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
with pytest.raises(TypeError):
lq**lq
@pytest.mark.parametrize("other", pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
"other",
(
1.23 * u.mag,
2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy),
u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2 * u.mag),
u.Magnitude(6.78, 2.0 * u.mag),
),
)
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
other_physical = other.to(
getattr(other.unit, "physical_unit", u.dimensionless_unscaled),
equivalencies=u.logarithmic(),
)
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize("other", pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1.0, 10.0), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1.0, 10.0))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1.0, 10.0))
assert lq1.unit == lu1
@pytest.mark.parametrize(
"other",
(
1.23 * u.mag,
2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy),
u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2 * u.mag),
u.Magnitude(6.78, 2.0 * u.mag),
),
)
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
other_physical = other.to(
getattr(other.unit, "physical_unit", u.dimensionless_unscaled),
equivalencies=u.logarithmic(),
)
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit("DM", 1.0 / (4.0 * np.pi * (10.0 * u.pc) ** 2))
DMmag = u.mag(dm0)
m_st = 10.0 * u.STmag
dm = 5.0 * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg / u.s / u.AA)
ratio = M_st.physical / (m_st.physical * 4.0 * np.pi * (100.0 * u.pc) ** 2)
assert np.abs(ratio - 1.0) < 1.0e-15
class TestLogQuantityComparisons:
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
with pytest.raises(TypeError):
lq > "a"
assert not (lq == "a")
assert lq != "a"
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
lq2 = u.Magnitude(2.0 * u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.0 * u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.0 * u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.0 * u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1.0, 4.0))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.0 * u.m
class TestLogQuantityMethods:
def setup_method(self):
self.mJy = np.arange(1.0, 5.0).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1.0, 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize(
"method",
(
"mean",
"min",
"max",
"round",
"trace",
"std",
"var",
"ptp",
"diff",
"ediff1d",
),
)
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value == getattr(mag._function_view, method)().value)
if method in ("std", "ptp", "diff", "ediff1d"):
assert res.unit == u.mag()
elif method == "var":
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(
mag.clip(2.0 * mag.unit, 4.0 * mag.unit).value
== mag.value.clip(2.0, 4.0)
)
@pytest.mark.parametrize("method", ("sum", "cumsum", "nansum"))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value == getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value == self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize("method", ("prod", "cumprod"))
def test_never_ok(self, method):
with pytest.raises(TypeError):
getattr(self.mJy, method)()
with pytest.raises(TypeError):
getattr(self.m1, method)()
|
4bb87b824b5fd48d20a0a03ba247e688ca308ad30e22db6873b711d2ee9fae99 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the Quantity class and related."""
import copy
import decimal
import numbers
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from astropy import units as u
from astropy.units.quantity import _UNIT_NOT_INITIALISED
from astropy.utils import isiterable, minversion
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
""" The Quantity class will represent a number + unit + uncertainty """
class TestQuantityCreation:
def test_1(self):
# create objects through operations with Unit objects:
quantity = 11.42 * u.meter # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = u.meter * 11.42 # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = 11.42 / u.meter
assert isinstance(quantity, u.Quantity)
quantity = u.meter / 11.42
assert isinstance(quantity, u.Quantity)
quantity = 11.42 * u.meter / u.second
assert isinstance(quantity, u.Quantity)
with pytest.raises(TypeError):
quantity = 182.234 + u.meter
with pytest.raises(TypeError):
quantity = 182.234 - u.meter
with pytest.raises(TypeError):
quantity = 182.234 % u.meter
def test_2(self):
# create objects using the Quantity constructor:
_ = u.Quantity(11.412, unit=u.meter)
_ = u.Quantity(21.52, "cm")
q3 = u.Quantity(11.412)
# By default quantities that don't specify a unit are unscaled
# dimensionless
assert q3.unit == u.Unit(1)
with pytest.raises(TypeError):
u.Quantity(object(), unit=u.m)
def test_3(self):
# with pytest.raises(u.UnitsError):
with pytest.raises(ValueError): # Until @mdboom fixes the errors in units
u.Quantity(11.412, unit="testingggg")
def test_nan_inf(self):
# Not-a-number
q = u.Quantity("nan", unit="cm")
assert np.isnan(q.value)
q = u.Quantity("NaN", unit="cm")
assert np.isnan(q.value)
q = u.Quantity("-nan", unit="cm") # float() allows this
assert np.isnan(q.value)
q = u.Quantity("nan cm")
assert np.isnan(q.value)
assert q.unit == u.cm
# Infinity
q = u.Quantity("inf", unit="cm")
assert np.isinf(q.value)
q = u.Quantity("-inf", unit="cm")
assert np.isinf(q.value)
q = u.Quantity("inf cm")
assert np.isinf(q.value)
assert q.unit == u.cm
q = u.Quantity("Infinity", unit="cm") # float() allows this
assert np.isinf(q.value)
# make sure these strings don't parse...
with pytest.raises(TypeError):
q = u.Quantity("", unit="cm")
with pytest.raises(TypeError):
q = u.Quantity("spam", unit="cm")
def test_unit_property(self):
# test getting and setting 'unit' attribute
q1 = u.Quantity(11.4, unit=u.meter)
with pytest.raises(AttributeError):
q1.unit = u.cm
def test_preserve_dtype(self):
"""Test that if an explicit dtype is given, it is used, while if not,
numbers are converted to float (including decimal.Decimal, which
numpy converts to an object; closes #1419)
"""
# If dtype is specified, use it, but if not, convert int, bool to float
q1 = u.Quantity(12, unit=u.m / u.s, dtype=int)
assert q1.dtype == int
q2 = u.Quantity(q1)
assert q2.dtype == float
assert q2.value == float(q1.value)
assert q2.unit == q1.unit
# but we should preserve any float32 or even float16
a3_32 = np.array([1.0, 2.0], dtype=np.float32)
q3_32 = u.Quantity(a3_32, u.yr)
assert q3_32.dtype == a3_32.dtype
a3_16 = np.array([1.0, 2.0], dtype=np.float16)
q3_16 = u.Quantity(a3_16, u.yr)
assert q3_16.dtype == a3_16.dtype
# items stored as objects by numpy should be converted to float
# by default
q4 = u.Quantity(decimal.Decimal("10.25"), u.m)
assert q4.dtype == float
q5 = u.Quantity(decimal.Decimal("10.25"), u.m, dtype=object)
assert q5.dtype == object
def test_numpy_style_dtype_inspect(self):
"""Test that if ``dtype=None``, NumPy's dtype inspection is used."""
q2 = u.Quantity(12, dtype=None)
assert np.issubdtype(q2.dtype, np.integer)
def test_float_dtype_promotion(self):
"""Test that if ``dtype=numpy.inexact``, the minimum precision is float64."""
q1 = u.Quantity(12, dtype=np.inexact)
assert not np.issubdtype(q1.dtype, np.integer)
assert q1.dtype == np.float64
q2 = u.Quantity(np.float64(12), dtype=np.inexact)
assert q2.dtype == np.float64
q3 = u.Quantity(np.float32(12), dtype=np.inexact)
assert q3.dtype == np.float32
if hasattr(np, "float16"):
q3 = u.Quantity(np.float16(12), dtype=np.inexact)
assert q3.dtype == np.float16
if hasattr(np, "float128"):
q4 = u.Quantity(np.float128(12), dtype=np.inexact)
assert q4.dtype == np.float128
def test_copy(self):
# By default, a new quantity is constructed, but not if copy=False
a = np.arange(10.0)
q0 = u.Quantity(a, unit=u.m / u.s)
assert q0.base is not a
q1 = u.Quantity(a, unit=u.m / u.s, copy=False)
assert q1.base is a
q2 = u.Quantity(q0)
assert q2 is not q0
assert q2.base is not q0.base
q2 = u.Quantity(q0, copy=False)
assert q2 is q0
assert q2.base is q0.base
q3 = u.Quantity(q0, q0.unit, copy=False)
assert q3 is q0
assert q3.base is q0.base
q4 = u.Quantity(q0, u.cm / u.s, copy=False)
assert q4 is not q0
assert q4.base is not q0.base
def test_subok(self):
"""Test subok can be used to keep class, or to insist on Quantity"""
class MyQuantitySubclass(u.Quantity):
pass
myq = MyQuantitySubclass(np.arange(10.0), u.m)
# try both with and without changing the unit
assert type(u.Quantity(myq)) is u.Quantity
assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass
assert type(u.Quantity(myq, u.km)) is u.Quantity
assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass
def test_order(self):
"""Test that order is correctly propagated to np.array"""
ac = np.array(np.arange(10.0), order="C")
qcc = u.Quantity(ac, u.m, order="C")
assert qcc.flags["C_CONTIGUOUS"]
qcf = u.Quantity(ac, u.m, order="F")
assert qcf.flags["F_CONTIGUOUS"]
qca = u.Quantity(ac, u.m, order="A")
assert qca.flags["C_CONTIGUOUS"]
# check it works also when passing in a quantity
assert u.Quantity(qcc, order="C").flags["C_CONTIGUOUS"]
assert u.Quantity(qcc, order="A").flags["C_CONTIGUOUS"]
assert u.Quantity(qcc, order="F").flags["F_CONTIGUOUS"]
af = np.array(np.arange(10.0), order="F")
qfc = u.Quantity(af, u.m, order="C")
assert qfc.flags["C_CONTIGUOUS"]
qff = u.Quantity(ac, u.m, order="F")
assert qff.flags["F_CONTIGUOUS"]
qfa = u.Quantity(af, u.m, order="A")
assert qfa.flags["F_CONTIGUOUS"]
assert u.Quantity(qff, order="C").flags["C_CONTIGUOUS"]
assert u.Quantity(qff, order="A").flags["F_CONTIGUOUS"]
assert u.Quantity(qff, order="F").flags["F_CONTIGUOUS"]
def test_ndmin(self):
"""Test that ndmin is correctly propagated to np.array"""
a = np.arange(10.0)
q1 = u.Quantity(a, u.m, ndmin=1)
assert q1.ndim == 1 and q1.shape == (10,)
q2 = u.Quantity(a, u.m, ndmin=2)
assert q2.ndim == 2 and q2.shape == (1, 10)
# check it works also when passing in a quantity
q3 = u.Quantity(q1, u.m, ndmin=3)
assert q3.ndim == 3 and q3.shape == (1, 1, 10)
# see github issue #10063
assert u.Quantity(u.Quantity(1, "m"), "m", ndmin=1).ndim == 1
assert u.Quantity(u.Quantity(1, "cm"), "m", ndmin=1).ndim == 1
def test_non_quantity_with_unit(self):
"""Test that unit attributes in objects get recognized."""
class MyQuantityLookalike(np.ndarray):
pass
a = np.arange(3.0)
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = "m"
q1 = u.Quantity(mylookalike)
assert isinstance(q1, u.Quantity)
assert q1.unit is u.m
assert np.all(q1.value == a)
q2 = u.Quantity(mylookalike, u.mm)
assert q2.unit is u.mm
assert np.all(q2.value == 1000.0 * a)
q3 = u.Quantity(mylookalike, copy=False)
assert np.all(q3.value == mylookalike)
q3[2] = 0
assert q3[2] == 0.0
assert mylookalike[2] == 0.0
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = u.m
q4 = u.Quantity(mylookalike, u.mm, copy=False)
q4[2] = 0
assert q4[2] == 0.0
assert mylookalike[2] == 2.0
mylookalike.unit = "nonsense"
with pytest.raises(TypeError):
u.Quantity(mylookalike)
def test_creation_via_view(self):
# This works but is no better than 1. * u.m
q1 = 1.0 << u.m
assert isinstance(q1, u.Quantity)
assert q1.unit == u.m
assert q1.value == 1.0
# With an array, we get an actual view.
a2 = np.arange(10.0)
q2 = a2 << u.m / u.s
assert isinstance(q2, u.Quantity)
assert q2.unit == u.m / u.s
assert np.all(q2.value == a2)
a2[9] = 0.0
assert np.all(q2.value == a2)
# But with a unit change we get a copy.
q3 = q2 << u.mm / u.s
assert isinstance(q3, u.Quantity)
assert q3.unit == u.mm / u.s
assert np.all(q3.value == a2 * 1000.0)
a2[8] = 0.0
assert q3[8].value == 8000.0
# Without a unit change, we do get a view.
q4 = q2 << q2.unit
a2[7] = 0.0
assert np.all(q4.value == a2)
with pytest.raises(u.UnitsError):
q2 << u.s
# But one can do an in-place unit change.
a2_copy = a2.copy()
q2 <<= u.mm / u.s
assert q2.unit == u.mm / u.s
# Of course, this changes a2 as well.
assert np.all(q2.value == a2)
# Sanity check on the values.
assert np.all(q2.value == a2_copy * 1000.0)
a2[8] = -1.0
# Using quantities, one can also work with strings.
q5 = q2 << "km/hr"
assert q5.unit == u.km / u.hr
assert np.all(q5 == q2)
# Finally, we can use scalar quantities as units.
not_quite_a_foot = 30.0 * u.cm
a6 = np.arange(5.0)
q6 = a6 << not_quite_a_foot
assert q6.unit == u.Unit(not_quite_a_foot)
assert np.all(q6.to_value(u.cm) == 30.0 * a6)
def test_rshift_warns(self):
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
1 >> u.m
assert len(warning_lines) == 1
q = 1.0 * u.km
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
q >> u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
q >>= u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
1.0 >> q
assert len(warning_lines) == 1
class TestQuantityOperations:
q1 = u.Quantity(11.42, u.meter)
q2 = u.Quantity(8.0, u.centimeter)
def test_addition(self):
# Take units from left object, q1
new_quantity = self.q1 + self.q2
assert new_quantity.value == 11.5
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 + self.q1
assert new_quantity.value == 1150.0
assert new_quantity.unit == u.centimeter
new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)
assert new_q.unit == u.m
assert new_q.value == 15000.1
def test_subtraction(self):
# Take units from left object, q1
new_quantity = self.q1 - self.q2
assert new_quantity.value == 11.34
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 - self.q1
assert new_quantity.value == -1134.0
assert new_quantity.unit == u.centimeter
def test_multiplication(self):
# Take units from left object, q1
new_quantity = self.q1 * self.q2
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.meter * u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 * self.q1
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.centimeter * u.meter)
# Multiply with a number
new_quantity = 15.0 * self.q1
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiply with a number
new_quantity = self.q1 * 15.0
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
def test_division(self):
# Take units from left object, q1
new_quantity = self.q1 / self.q2
assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)
assert new_quantity.unit == (u.meter / u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 / self.q1
assert_array_almost_equal(new_quantity.value, 0.70052539404553416, decimal=16)
assert new_quantity.unit == (u.centimeter / u.meter)
q1 = u.Quantity(11.4, unit=u.meter)
q2 = u.Quantity(10.0, unit=u.second)
new_quantity = q1 / q2
assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)
assert new_quantity.unit == (u.meter / u.second)
# divide with a number
new_quantity = self.q1 / 10.0
assert new_quantity.value == 1.142
assert new_quantity.unit == u.meter
# divide with a number
new_quantity = 11.42 / self.q1
assert new_quantity.value == 1.0
assert new_quantity.unit == u.Unit("1/m")
def test_commutativity(self):
"""Regression test for issue #587."""
new_q = u.Quantity(11.42, "m*s")
assert self.q1 * u.s == u.s * self.q1 == new_q
assert self.q1 / u.s == u.Quantity(11.42, "m/s")
assert u.s / self.q1 == u.Quantity(1 / 11.42, "s/m")
def test_power(self):
# raise quantity to a power
new_quantity = self.q1**2
assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)
assert new_quantity.unit == u.Unit("m^2")
new_quantity = self.q1**3
assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)
assert new_quantity.unit == u.Unit("m^3")
def test_matrix_multiplication(self):
a = np.eye(3)
q = a * u.m
result1 = q @ a
assert np.all(result1 == q)
result2 = a @ q
assert np.all(result2 == q)
result3 = q @ q
assert np.all(result3 == a * u.m**2)
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
result4 = q @ q2
assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit)
def test_unary(self):
# Test the minus unary operator
new_quantity = -self.q1
assert new_quantity.value == -self.q1.value
assert new_quantity.unit == self.q1.unit
new_quantity = -(-self.q1)
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
# Test the plus unary operator
new_quantity = +self.q1
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
def test_abs(self):
q = 1.0 * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == q.value
assert new_quantity.unit == q.unit
q = -1.0 * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == -q.value
assert new_quantity.unit == q.unit
def test_incompatible_units(self):
"""When trying to add or subtract units that aren't compatible, throw an error"""
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, unit=u.second)
with pytest.raises(u.UnitsError):
q1 + q2
def test_non_number_type(self):
q1 = u.Quantity(11.412, unit=u.meter)
with pytest.raises(
TypeError, match=r"Unsupported operand type\(s\) for ufunc .*"
):
q1 + {"a": 1}
with pytest.raises(TypeError):
q1 + u.meter
def test_dimensionless_operations(self):
# test conversion to dimensionless
dq = 3.0 * u.m / u.km
dq1 = dq + 1.0 * u.mm / u.km
assert dq1.value == 3.001
assert dq1.unit == dq.unit
dq2 = dq + 1.0
assert dq2.value == 1.003
assert dq2.unit == u.dimensionless_unscaled
# this test will check that operations with dimensionless Quantities
# don't work
with pytest.raises(u.UnitsError):
self.q1 + u.Quantity(0.1, unit=u.Unit(""))
with pytest.raises(u.UnitsError):
self.q1 - u.Quantity(0.1, unit=u.Unit(""))
# and test that scaling of integers works
q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
q2 = q + np.array([4, 5, 6])
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
# but not if doing it inplace
with pytest.raises(TypeError):
q += np.array([1, 2, 3])
# except if it is actually possible
q = np.array([1, 2, 3]) * u.km / u.m
q += np.array([4, 5, 6])
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == np.array([1004, 2005, 3006]))
def test_complicated_operation(self):
"""Perform a more complicated test"""
from astropy.units import imperial
# Multiple units
distance = u.Quantity(15.0, u.meter)
time = u.Quantity(11.0, u.second)
velocity = (distance / time).to(imperial.mile / u.hour)
assert_array_almost_equal(velocity.value, 3.05037, decimal=5)
G = u.Quantity(6.673e-11, u.m**3 / u.kg / u.s**2)
_ = (1.0 / (4.0 * np.pi * G)).to(u.pc**-3 / u.s**-2 * u.kg)
# Area
side1 = u.Quantity(11.0, u.centimeter)
side2 = u.Quantity(7.0, u.centimeter)
area = side1 * side2
assert_array_almost_equal(area.value, 77.0, decimal=15)
assert area.unit == u.cm * u.cm
def test_comparison(self):
# equality/ non-equality is straightforward for quantity objects
assert (1 / (u.cm * u.cm)) == 1 * u.cm**-2
assert 1 * u.m == 100 * u.cm
assert 1 * u.m != 1 * u.cm
# when one is a unit, Quantity does not know what to do,
# but unit is fine with it, so it still works
unit = u.cm**3
q = 1.0 * unit
assert q.__eq__(unit) is NotImplemented
assert unit.__eq__(q) is True
assert q == unit
q = 1000.0 * u.mm**3
assert q == unit
# mismatched types should never work
assert not 1.0 * u.cm == 1.0
assert 1.0 * u.cm != 1.0
# comparison with zero should raise a deprecation warning
for quantity in (1.0 * u.cm, 1.0 * u.dimensionless_unscaled):
with pytest.warns(
AstropyDeprecationWarning,
match=(
"The truth value of a Quantity is ambiguous. "
"In the future this will raise a ValueError."
),
):
bool(quantity)
def test_numeric_converters(self):
# float, int, long, and __index__ should only work for single
# quantities, of appropriate type, and only if they are dimensionless.
# for index, this should be unscaled as well
# (Check on __index__ is also a regression test for #1557)
# quantities with units should never convert, or be usable as an index
q1 = u.Quantity(1, u.m)
converter_err_msg = (
"only dimensionless scalar quantities can be converted to Python scalars"
)
index_err_msg = (
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
with pytest.raises(TypeError) as exc:
float(q1)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q1)
assert exc.value.args[0] == converter_err_msg
# We used to test `q1 * ['a', 'b', 'c'] here, but that that worked
# at all was a really odd confluence of bugs. Since it doesn't work
# in numpy >=1.10 any more, just go directly for `__index__` (which
# makes the test more similar to the `int`, `long`, etc., tests).
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless but scaled is OK, however
q2 = u.Quantity(1.23, u.m / u.km)
assert float(q2) == float(q2.to_value(u.dimensionless_unscaled))
assert int(q2) == int(q2.to_value(u.dimensionless_unscaled))
with pytest.raises(TypeError) as exc:
q2.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless unscaled is OK, though for index needs to be int
q3 = u.Quantity(1.23, u.dimensionless_unscaled)
assert float(q3) == 1.23
assert int(q3) == 1
with pytest.raises(TypeError) as exc:
q3.__index__()
assert exc.value.args[0] == index_err_msg
# integer dimensionless unscaled is good for all
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert float(q4) == 2.0
assert int(q4) == 2
assert q4.__index__() == 2
# but arrays are not OK
q5 = u.Quantity([1, 2], u.m)
with pytest.raises(TypeError) as exc:
float(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
q5.__index__()
assert exc.value.args[0] == index_err_msg
# See https://github.com/numpy/numpy/issues/5074
# It seems unlikely this will be resolved, so xfail'ing it.
@pytest.mark.xfail(reason="list multiplication only works for numpy <=1.10")
def test_numeric_converter_to_index_in_practice(self):
"""Test that use of __index__ actually works."""
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert q4 * ["a", "b", "c"] == ["a", "b", "c", "a", "b", "c"]
def test_array_converters(self):
# Scalar quantity
q = u.Quantity(1.23, u.m)
assert np.all(np.array(q) == np.array([1.23]))
# Array quantity
q = u.Quantity([1.0, 2.0, 3.0], u.m)
assert np.all(np.array(q) == np.array([1.0, 2.0, 3.0]))
def test_quantity_conversion():
q1 = u.Quantity(0.1, unit=u.meter)
value = q1.value
assert value == 0.1
value_in_km = q1.to_value(u.kilometer)
assert value_in_km == 0.0001
new_quantity = q1.to(u.kilometer)
assert new_quantity.value == 0.0001
with pytest.raises(u.UnitsError):
q1.to(u.zettastokes)
with pytest.raises(u.UnitsError):
q1.to_value(u.zettastokes)
def test_quantity_ilshift(): # in-place conversion
q = u.Quantity(10, unit=u.one)
# Incompatible units. This goes through ilshift and hits a
# UnitConversionError first in ilshift, then in the unit's rlshift.
with pytest.raises(u.UnitConversionError):
q <<= u.rad
# unless the equivalency is enabled
with u.add_enabled_equivalencies(u.dimensionless_angles()):
q <<= u.rad
assert np.isclose(q, 10 * u.rad)
def test_regression_12964():
# This will fail if the fix to
# https://github.com/astropy/astropy/issues/12964 doesn't work.
x = u.Quantity(10, u.km, dtype=int)
x <<= u.pc
# We add a test that this worked.
assert x.unit is u.pc
assert x.dtype == np.float64
def test_quantity_value_views():
q1 = u.Quantity([1.0, 2.0], unit=u.meter)
# views if the unit is the same.
v1 = q1.value
v1[0] = 0.0
assert np.all(q1 == [0.0, 2.0] * u.meter)
v2 = q1.to_value()
v2[1] = 3.0
assert np.all(q1 == [0.0, 3.0] * u.meter)
v3 = q1.to_value("m")
v3[0] = 1.0
assert np.all(q1 == [1.0, 3.0] * u.meter)
q2 = q1.to("m", copy=False)
q2[0] = 2 * u.meter
assert np.all(q1 == [2.0, 3.0] * u.meter)
v4 = q1.to_value("cm")
v4[0] = 0.0
# copy if different unit.
assert np.all(q1 == [2.0, 3.0] * u.meter)
def test_quantity_conversion_with_equiv():
q1 = u.Quantity(0.1, unit=u.meter)
v2 = q1.to_value(u.Hz, equivalencies=u.spectral())
assert_allclose(v2, 2997924580.0)
q2 = q1.to(u.Hz, equivalencies=u.spectral())
assert_allclose(q2.value, v2)
q1 = u.Quantity(0.4, unit=u.arcsecond)
v2 = q1.to_value(u.au, equivalencies=u.parallax())
q2 = q1.to(u.au, equivalencies=u.parallax())
v3 = q2.to_value(u.arcminute, equivalencies=u.parallax())
q3 = q2.to(u.arcminute, equivalencies=u.parallax())
assert_allclose(v2, 515662.015)
assert_allclose(q2.value, v2)
assert q2.unit == u.au
assert_allclose(v3, 0.0066666667)
assert_allclose(q3.value, v3)
assert q3.unit == u.arcminute
def test_quantity_conversion_equivalency_passed_on():
class MySpectral(u.Quantity):
_equivalencies = u.spectral()
def __quantity_view__(self, obj, unit):
return obj.view(MySpectral)
def __quantity_instance__(self, *args, **kwargs):
return MySpectral(*args, **kwargs)
q1 = MySpectral([1000, 2000], unit=u.Hz)
q2 = q1.to(u.nm)
assert q2.unit == u.nm
q3 = q2.to(u.Hz)
assert q3.unit == u.Hz
assert_allclose(q3.value, q1.value)
q4 = MySpectral([1000, 2000], unit=u.nm)
q5 = q4.to(u.Hz).to(u.nm)
assert q5.unit == u.nm
assert_allclose(q4.value, q5.value)
# Regression test for issue #2315, divide-by-zero error when examining 0*unit
def test_self_equivalency():
assert u.deg.is_equivalent(0 * u.radian)
assert u.deg.is_equivalent(1 * u.radian)
def test_si():
q1 = 10.0 * u.m * u.s**2 / (200.0 * u.ms) ** 2 # 250 meters
assert q1.si.value == 250
assert q1.si.unit == u.m
q = 10.0 * u.m # 10 meters
assert q.si.value == 10
assert q.si.unit == u.m
q = 10.0 / u.m # 10 1 / meters
assert q.si.value == 10
assert q.si.unit == (1 / u.m)
def test_cgs():
q1 = 10.0 * u.cm * u.s**2 / (200.0 * u.ms) ** 2 # 250 centimeters
assert q1.cgs.value == 250
assert q1.cgs.unit == u.cm
q = 10.0 * u.m # 10 centimeters
assert q.cgs.value == 1000
assert q.cgs.unit == u.cm
q = 10.0 / u.cm # 10 1 / centimeters
assert q.cgs.value == 10
assert q.cgs.unit == (1 / u.cm)
q = 10.0 * u.Pa # 10 pascals
assert q.cgs.value == 100
assert q.cgs.unit == u.barye
class TestQuantityComparison:
def test_quantity_equality(self):
assert u.Quantity(1000, unit="m") == u.Quantity(1, unit="km")
assert not (u.Quantity(1, unit="m") == u.Quantity(1, unit="km"))
# for ==, !=, return False, True if units do not match
assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True
assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False
assert (u.Quantity(0, unit=u.m) == u.Quantity(0, unit=u.s)) is False
# But allow comparison with 0, +/-inf if latter unitless
assert u.Quantity(0, u.m) == 0.0
assert u.Quantity(1, u.m) != 0.0
assert u.Quantity(1, u.m) != np.inf
assert u.Quantity(np.inf, u.m) == np.inf
def test_quantity_equality_array(self):
a = u.Quantity([0.0, 1.0, 1000.0], u.m)
b = u.Quantity(1.0, u.km)
eq = a == b
ne = a != b
assert np.all(eq == [False, False, True])
assert np.all(eq != ne)
# For mismatched units, we should just get True, False
c = u.Quantity(1.0, u.s)
eq = a == c
ne = a != c
assert eq is False
assert ne is True
# Constants are treated as dimensionless, so False too.
eq = a == 1.0
ne = a != 1.0
assert eq is False
assert ne is True
# But 0 can have any units, so we can compare.
eq = a == 0
ne = a != 0
assert np.all(eq == [True, False, False])
assert np.all(eq != ne)
# But we do not extend that to arrays; they should have the same unit.
d = np.array([0, 1.0, 1000.0])
eq = a == d
ne = a != d
assert eq is False
assert ne is True
def test_quantity_comparison(self):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second)
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second)
assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer)
class TestQuantityDisplay:
scalarintq = u.Quantity(1, unit="m", dtype=int)
scalarfloatq = u.Quantity(1.3, unit="m")
arrq = u.Quantity([1, 2.3, 8.9], unit="m")
scalar_complex_q = u.Quantity(complex(1.0, 2.0))
scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25)
scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36)
arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36))
big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36))
def test_dimensionless_quantity_repr(self):
q2 = u.Quantity(1.0, unit="m-1")
q3 = u.Quantity(1, unit="m-1", dtype=int)
assert repr(self.scalarintq * q2) == "<Quantity 1.>"
assert repr(self.arrq * q2) == "<Quantity [1. , 2.3, 8.9]>"
assert repr(self.scalarintq * q3) == "<Quantity 1>"
def test_dimensionless_quantity_str(self):
q2 = u.Quantity(1.0, unit="m-1")
q3 = u.Quantity(1, unit="m-1", dtype=int)
assert str(self.scalarintq * q2) == "1.0"
assert str(self.scalarintq * q3) == "1"
assert str(self.arrq * q2) == "[1. 2.3 8.9]"
def test_dimensionless_quantity_format(self):
q1 = u.Quantity(3.14)
assert format(q1, ".2f") == "3.14"
assert f"{q1:cds}" == "3.14"
def test_scalar_quantity_str(self):
assert str(self.scalarintq) == "1 m"
assert str(self.scalarfloatq) == "1.3 m"
def test_scalar_quantity_repr(self):
assert repr(self.scalarintq) == "<Quantity 1 m>"
assert repr(self.scalarfloatq) == "<Quantity 1.3 m>"
def test_array_quantity_str(self):
assert str(self.arrq) == "[1. 2.3 8.9] m"
def test_array_quantity_repr(self):
assert repr(self.arrq) == "<Quantity [1. , 2.3, 8.9] m>"
def test_scalar_quantity_format(self):
assert format(self.scalarintq, "02d") == "01 m"
assert format(self.scalarfloatq, ".1f") == "1.3 m"
assert format(self.scalarfloatq, ".0f") == "1 m"
assert f"{self.scalarintq:cds}" == "1 m"
assert f"{self.scalarfloatq:cds}" == "1.3 m"
def test_uninitialized_unit_format(self):
bad_quantity = np.arange(10.0).view(u.Quantity)
assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)
assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + ">")
def test_to_string(self):
qscalar = u.Quantity(1.5e14, "m/s")
# __str__ is the default `format`
assert str(qscalar) == qscalar.to_string()
res = "Quantity as KMS: 150000000000.0 km / s"
assert f"Quantity as KMS: {qscalar.to_string(unit=u.km / u.s)}" == res
# With precision set
res = "Quantity as KMS: 1.500e+11 km / s"
assert (
f"Quantity as KMS: {qscalar.to_string(precision=3, unit=u.km / u.s)}" == res
)
res = r"$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
assert qscalar.to_string(format="latex") == res
assert qscalar.to_string(format="latex", subfmt="inline") == res
res = r"$\displaystyle 1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
assert qscalar.to_string(format="latex", subfmt="display") == res
res = r"$1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$"
assert qscalar.to_string(format="latex_inline") == res
assert qscalar.to_string(format="latex_inline", subfmt="inline") == res
res = r"$\displaystyle 1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$"
assert qscalar.to_string(format="latex_inline", subfmt="display") == res
res = "[0 1 2] (Unit not initialised)"
assert np.arange(3).view(u.Quantity).to_string() == res
def test_repr_latex(self):
from astropy.units.quantity import conf
q2scalar = u.Quantity(1.5e14, "m/s")
assert self.scalarintq._repr_latex_() == r"$1 \; \mathrm{m}$"
assert self.scalarfloatq._repr_latex_() == r"$1.3 \; \mathrm{m}$"
assert (
q2scalar._repr_latex_() == r"$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
)
assert self.arrq._repr_latex_() == r"$[1,~2.3,~8.9] \; \mathrm{m}$"
# Complex quantities
assert self.scalar_complex_q._repr_latex_() == r"$(1+2i) \; \mathrm{}$"
assert (
self.scalar_big_complex_q._repr_latex_()
== r"$(1 \times 10^{25}+2 \times 10^{52}i) \; \mathrm{}$"
)
assert (
self.scalar_big_neg_complex_q._repr_latex_()
== r"$(-1 \times 10^{36}-2 \times 10^{63}i) \; \mathrm{}$"
)
assert self.arr_complex_q._repr_latex_() == (
r"$[(0-0i),~(-1 \times 10^{36}-2 \times 10^{63}i),"
r"~(-2 \times 10^{36}-4 \times 10^{63}i)] \; \mathrm{}$"
)
assert r"\dots" in self.big_arr_complex_q._repr_latex_()
qmed = np.arange(100) * u.m
qbig = np.arange(1000) * u.m
qvbig = np.arange(10000) * 1e9 * u.m
pops = np.get_printoptions()
oldlat = conf.latex_array_threshold
try:
# check precision behavior
q = u.Quantity(987654321.123456789, "m/s")
qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm
np.set_printoptions(precision=8)
assert (
q._repr_latex_() == r"$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$"
)
assert (
qa._repr_latex_()
== r"$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$"
)
np.set_printoptions(precision=2)
assert q._repr_latex_() == r"$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$"
assert qa._repr_latex_() == r"$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$"
# check thresholding behavior
conf.latex_array_threshold = 100 # should be default
lsmed = qmed._repr_latex_()
assert r"\dots" not in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
conf.latex_array_threshold = 1001
lsmed = qmed._repr_latex_()
assert r"\dots" not in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" not in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
conf.latex_array_threshold = -1 # means use the numpy threshold
np.set_printoptions(threshold=99)
lsmed = qmed._repr_latex_()
assert r"\dots" in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
assert lsvbig.endswith(",~1 \\times 10^{13}] \\; \\mathrm{m}$")
finally:
# prevent side-effects from influencing other tests
np.set_printoptions(**pops)
conf.latex_array_threshold = oldlat
qinfnan = [np.inf, -np.inf, np.nan] * u.m
assert qinfnan._repr_latex_() == r"$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$"
def test_decompose():
q1 = 5 * u.N
assert q1.decompose() == (5 * u.kg * u.m * u.s**-2)
def test_decompose_regression():
"""
Regression test for bug #1163
If decompose was called multiple times on a Quantity with an array and a
scale != 1, the result changed every time. This is because the value was
being referenced not copied, then modified, which changed the original
value.
"""
q = np.array([1, 2, 3]) * u.m / (2.0 * u.km)
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
assert np.all(q == np.array([1, 2, 3]) * u.m / (2.0 * u.km))
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
def test_arrays():
"""
Test using quantites with array values
"""
qsec = u.Quantity(np.arange(10), u.second)
assert isinstance(qsec.value, np.ndarray)
assert not qsec.isscalar
# len and indexing should work for arrays
assert len(qsec) == len(qsec.value)
qsecsub25 = qsec[2:5]
assert qsecsub25.unit == qsec.unit
assert isinstance(qsecsub25, u.Quantity)
assert len(qsecsub25) == 3
# make sure isscalar, len, and indexing behave correctly for non-arrays.
qsecnotarray = u.Quantity(10.0, u.second)
assert qsecnotarray.isscalar
with pytest.raises(TypeError):
len(qsecnotarray)
with pytest.raises(TypeError):
qsecnotarray[0]
qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)
# 0d numpy array should act basically like a scalar
assert qseclen0array.isscalar
with pytest.raises(TypeError):
len(qseclen0array)
with pytest.raises(TypeError):
qseclen0array[0]
assert isinstance(qseclen0array.value, numbers.Integral)
a = np.array(
[(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)],
dtype=[("x", float), ("y", float), ("z", float)],
)
qkpc = u.Quantity(a, u.kpc)
assert not qkpc.isscalar
qkpc0 = qkpc[0]
assert qkpc0.value == a[0]
assert qkpc0.unit == qkpc.unit
assert isinstance(qkpc0, u.Quantity)
assert qkpc0.isscalar
qkpcx = qkpc["x"]
assert np.all(qkpcx.value == a["x"])
assert qkpcx.unit == qkpc.unit
assert isinstance(qkpcx, u.Quantity)
assert not qkpcx.isscalar
qkpcx1 = qkpc["x"][1]
assert qkpcx1.unit == qkpc.unit
assert isinstance(qkpcx1, u.Quantity)
assert qkpcx1.isscalar
qkpc1x = qkpc[1]["x"]
assert qkpc1x.isscalar
assert qkpc1x == qkpcx1
# can also create from lists, will auto-convert to arrays
qsec = u.Quantity(list(range(10)), u.second)
assert isinstance(qsec.value, np.ndarray)
# quantity math should work with arrays
assert_array_equal((qsec * 2).value, (np.arange(10) * 2))
assert_array_equal((qsec / 2).value, (np.arange(10) / 2))
# quantity addition/subtraction should *not* work with arrays b/c unit
# ambiguous
with pytest.raises(u.UnitsError):
assert_array_equal((qsec + 2).value, (np.arange(10) + 2))
with pytest.raises(u.UnitsError):
assert_array_equal((qsec - 2).value, (np.arange(10) + 2))
# should create by unit multiplication, too
qsec2 = np.arange(10) * u.second
qsec3 = u.second * np.arange(10)
assert np.all(qsec == qsec2)
assert np.all(qsec2 == qsec3)
# make sure numerical-converters fail when arrays are present
with pytest.raises(TypeError):
float(qsec)
with pytest.raises(TypeError):
int(qsec)
def test_array_indexing_slicing():
q = np.array([1.0, 2.0, 3.0]) * u.m
assert q[0] == 1.0 * u.m
assert np.all(q[0:2] == u.Quantity([1.0, 2.0], u.m))
def test_array_setslice():
q = np.array([1.0, 2.0, 3.0]) * u.m
q[1:2] = np.array([400.0]) * u.cm
assert np.all(q == np.array([1.0, 4.0, 3.0]) * u.m)
def test_inverse_quantity():
"""
Regression test from issue #679
"""
q = u.Quantity(4.0, u.meter / u.second)
qot = q / 2
toq = 2 / q
npqot = q / np.array(2)
assert npqot.value == 2.0
assert npqot.unit == (u.meter / u.second)
assert qot.value == 2.0
assert qot.unit == (u.meter / u.second)
assert toq.value == 0.5
assert toq.unit == (u.second / u.meter)
def test_quantity_mutability():
q = u.Quantity(9.8, u.meter / u.second / u.second)
with pytest.raises(AttributeError):
q.value = 3
with pytest.raises(AttributeError):
q.unit = u.kg
def test_quantity_initialized_with_quantity():
q1 = u.Quantity(60, u.second)
q2 = u.Quantity(q1, u.minute)
assert q2.value == 1
q3 = u.Quantity([q1, q2], u.second)
assert q3[0].value == 60
assert q3[1].value == 60
q4 = u.Quantity([q2, q1])
assert q4.unit == q2.unit
assert q4[0].value == 1
assert q4[1].value == 1
def test_quantity_string_unit():
q1 = 1.0 * u.m / "s"
assert q1.value == 1
assert q1.unit == (u.m / u.s)
q2 = q1 * "m"
assert q2.unit == ((u.m * u.m) / u.s)
def test_quantity_invalid_unit_string():
with pytest.raises(ValueError):
"foo" * u.m
def test_implicit_conversion():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
assert_allclose(q.centimeter, 100)
assert_allclose(q.cm, 100)
assert_allclose(q.parsec, 3.240779289469756e-17)
def test_implicit_conversion_autocomplete():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
q.foo = 42
attrs = dir(q)
assert "centimeter" in attrs
assert "cm" in attrs
assert "parsec" in attrs
assert "foo" in attrs
assert "to" in attrs
assert "value" in attrs
# Something from the base class, object
assert "__setattr__" in attrs
with pytest.raises(AttributeError):
q.l
def test_quantity_iterability():
"""Regressiont est for issue #878.
Scalar quantities should not be iterable and should raise a type error on
iteration.
"""
q1 = [15.0, 17.0] * u.m
assert isiterable(q1)
q2 = next(iter(q1))
assert q2 == 15.0 * u.m
assert not isiterable(q2)
pytest.raises(TypeError, iter, q2)
def test_copy():
q1 = u.Quantity(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), unit=u.m)
q2 = q1.copy()
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
q3 = q1.copy(order="F")
assert q3.flags["F_CONTIGUOUS"]
assert np.all(q1.value == q3.value)
assert q1.unit == q3.unit
assert q1.dtype == q3.dtype
assert q1.value is not q3.value
q4 = q1.copy(order="C")
assert q4.flags["C_CONTIGUOUS"]
assert np.all(q1.value == q4.value)
assert q1.unit == q4.unit
assert q1.dtype == q4.dtype
assert q1.value is not q4.value
def test_deepcopy():
q1 = u.Quantity(np.array([1.0, 2.0, 3.0]), unit=u.m)
q2 = copy.deepcopy(q1)
assert isinstance(q2, u.Quantity)
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
def test_equality_numpy_scalar():
"""
A regression test to ensure that numpy scalars are correctly compared
(which originally failed due to the lack of ``__array_priority__``).
"""
assert 10 != 10.0 * u.m
assert np.int64(10) != 10 * u.m
assert 10 * u.m != np.int64(10)
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
def test_quantity_initialisation_from_string():
q = u.Quantity("1")
assert q.unit == u.dimensionless_unscaled
assert q.value == 1.0
q = u.Quantity("1.5 m/s")
assert q.unit == u.m / u.s
assert q.value == 1.5
assert u.Unit(q) == u.Unit("1.5 m/s")
q = u.Quantity(".5 m")
assert q == u.Quantity(0.5, u.m)
q = u.Quantity("-1e1km")
assert q == u.Quantity(-10, u.km)
q = u.Quantity("-1e+1km")
assert q == u.Quantity(-10, u.km)
q = u.Quantity("+.5km")
assert q == u.Quantity(0.5, u.km)
q = u.Quantity("+5e-1km")
assert q == u.Quantity(0.5, u.km)
q = u.Quantity("5", u.m)
assert q == u.Quantity(5.0, u.m)
q = u.Quantity("5 km", u.m)
assert q.value == 5000.0
assert q.unit == u.m
q = u.Quantity("5Em")
assert q == u.Quantity(5.0, u.Em)
with pytest.raises(TypeError):
u.Quantity("")
with pytest.raises(TypeError):
u.Quantity("m")
with pytest.raises(TypeError):
u.Quantity("1.2.3 deg")
with pytest.raises(TypeError):
u.Quantity("1+deg")
with pytest.raises(TypeError):
u.Quantity("1-2deg")
with pytest.raises(TypeError):
u.Quantity("1.2e-13.3m")
with pytest.raises(TypeError):
u.Quantity(["5"])
with pytest.raises(TypeError):
u.Quantity(np.array(["5"]))
with pytest.raises(ValueError):
u.Quantity("5E")
with pytest.raises(ValueError):
u.Quantity("5 foo")
def test_unsupported():
q1 = np.arange(10) * u.m
with pytest.raises(TypeError):
np.bitwise_and(q1, q1)
def test_unit_identity():
q = 1.0 * u.hour
assert q.unit is u.hour
def test_quantity_to_view():
q1 = np.array([1000, 2000]) * u.m
q2 = q1.to(u.km)
assert q1.value[0] == 1000
assert q2.value[0] == 1
def test_quantity_tuple_power():
with pytest.raises(ValueError):
(5.0 * u.m) ** (1, 2)
def test_quantity_fraction_power():
q = (25.0 * u.m**2) ** Fraction(1, 2)
assert q.value == 5.0
assert q.unit == u.m
# Regression check to ensure we didn't create an object type by raising
# the value of the quantity to a Fraction. [#3922]
assert q.dtype.kind == "f"
def test_quantity_from_table():
"""
Checks that units from tables are respected when converted to a Quantity.
This also generically checks the use of *anything* with a `unit` attribute
passed into Quantity
"""
from astropy.table import Table
t = Table(data=[np.arange(5), np.arange(5)], names=["a", "b"])
t["a"].unit = u.kpc
qa = u.Quantity(t["a"])
assert qa.unit == u.kpc
assert_array_equal(qa.value, t["a"])
qb = u.Quantity(t["b"])
assert qb.unit == u.dimensionless_unscaled
assert_array_equal(qb.value, t["b"])
# This does *not* auto-convert, because it's not necessarily obvious that's
# desired. Instead we revert to standard `Quantity` behavior
qap = u.Quantity(t["a"], u.pc)
assert qap.unit == u.pc
assert_array_equal(qap.value, t["a"] * 1000)
qbp = u.Quantity(t["b"], u.pc)
assert qbp.unit == u.pc
assert_array_equal(qbp.value, t["b"])
# Also check with a function unit (regression test for gh-8430)
t["a"].unit = u.dex(u.cm / u.s**2)
fq = u.Dex(t["a"])
assert fq.unit == u.dex(u.cm / u.s**2)
assert_array_equal(fq.value, t["a"])
fq2 = u.Quantity(t["a"], subok=True)
assert isinstance(fq2, u.Dex)
assert fq2.unit == u.dex(u.cm / u.s**2)
assert_array_equal(fq2.value, t["a"])
with pytest.raises(u.UnitTypeError):
u.Quantity(t["a"])
def test_assign_slice_with_quantity_like():
# Regression tests for gh-5961
from astropy.table import Column, Table
# first check directly that we can use a Column to assign to a slice.
c = Column(np.arange(10.0), unit=u.mm)
q = u.Quantity(c)
q[:2] = c[:2]
# next check that we do not fail the original problem.
t = Table()
t["x"] = np.arange(10) * u.mm
t["y"] = np.ones(10) * u.mm
assert type(t["x"]) is Column
xy = np.vstack([t["x"], t["y"]]).T * u.mm
ii = [0, 2, 4]
assert xy[ii, 0].unit == t["x"][ii].unit
# should not raise anything
xy[ii, 0] = t["x"][ii]
def test_insert():
"""
Test Quantity.insert method. This does not test the full capabilities
of the underlying np.insert, but hits the key functionality for
Quantity.
"""
q = [1, 2] * u.m
# Insert a compatible float with different units
q2 = q.insert(0, 1 * u.km)
assert np.all(q2.value == [1000, 1, 2])
assert q2.unit is u.m
assert q2.dtype.kind == "f"
if minversion(np, "1.8.0"):
q2 = q.insert(1, [1, 2] * u.km)
assert np.all(q2.value == [1, 1000, 2000, 2])
assert q2.unit is u.m
# Cannot convert 1.5 * u.s to m
with pytest.raises(u.UnitsError):
q.insert(1, 1.5 * u.s)
# Tests with multi-dim quantity
q = [[1, 2], [3, 4]] * u.m
q2 = q.insert(1, [10, 20] * u.m, axis=0)
assert np.all(q2.value == [[1, 2], [10, 20], [3, 4]])
q2 = q.insert(1, [10, 20] * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2], [3, 20, 4]])
q2 = q.insert(1, 10 * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2], [3, 10, 4]])
def test_repr_array_of_quantity():
"""
Test print/repr of object arrays of Quantity objects with different
units.
Regression test for the issue first reported in
https://github.com/astropy/astropy/issues/3777
"""
a = np.array([1 * u.m, 2 * u.s], dtype=object)
assert repr(a) == "array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)"
assert str(a) == "[<Quantity 1. m> <Quantity 2. s>]"
class TestSpecificTypeQuantity:
def setup_method(self):
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
class Length2(Length):
_default_unit = u.m
class Length3(Length):
_unit = u.m
self.Length = Length
self.Length2 = Length2
self.Length3 = Length3
def test_creation(self):
l = self.Length(np.arange(10.0) * u.km)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.0) * u.hour)
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.0))
l2 = self.Length2(np.arange(5.0))
assert type(l2) is self.Length2
assert l2._default_unit is self.Length2._default_unit
with pytest.raises(u.UnitTypeError):
self.Length3(np.arange(10.0))
def test_view(self):
l = (np.arange(5.0) * u.km).view(self.Length)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
(np.arange(5.0) * u.s).view(self.Length)
v = np.arange(5.0).view(self.Length)
assert type(v) is self.Length
assert v._unit is None
l3 = np.ones((2, 2)).view(self.Length3)
assert type(l3) is self.Length3
assert l3.unit is self.Length3._unit
def test_operation_precedence_and_fallback(self):
l = self.Length(np.arange(5.0) * u.cm)
sum1 = l + 1.0 * u.m
assert type(sum1) is self.Length
sum2 = 1.0 * u.km + l
assert type(sum2) is self.Length
sum3 = l + l
assert type(sum3) is self.Length
res1 = l * (1.0 * u.m)
assert type(res1) is u.Quantity
res2 = l * l
assert type(res2) is u.Quantity
def test_unit_class_override():
class MyQuantity(u.Quantity):
pass
my_unit = u.Unit("my_deg", u.deg)
my_unit._quantity_class = MyQuantity
q1 = u.Quantity(1.0, my_unit)
assert type(q1) is u.Quantity
q2 = u.Quantity(1.0, my_unit, subok=True)
assert type(q2) is MyQuantity
class QuantityMimic:
def __init__(self, value, unit):
self.value = value
self.unit = unit
def __array__(self):
return np.array(self.value)
class QuantityMimic2(QuantityMimic):
def to(self, unit):
return u.Quantity(self.value, self.unit).to(unit)
def to_value(self, unit):
return u.Quantity(self.value, self.unit).to_value(unit)
class TestQuantityMimics:
"""Test Quantity Mimics that are not ndarray subclasses."""
@pytest.mark.parametrize("Mimic", (QuantityMimic, QuantityMimic2))
def test_mimic_input(self, Mimic):
value = np.arange(10.0)
mimic = Mimic(value, u.m)
q = u.Quantity(mimic)
assert q.unit == u.m
assert np.all(q.value == value)
q2 = u.Quantity(mimic, u.cm)
assert q2.unit == u.cm
assert np.all(q2.value == 100 * value)
@pytest.mark.parametrize("Mimic", (QuantityMimic, QuantityMimic2))
def test_mimic_setting(self, Mimic):
mimic = Mimic([1.0, 2.0], u.m)
q = u.Quantity(np.arange(10.0), u.cm)
q[8:] = mimic
assert np.all(q[:8].value == np.arange(8.0))
assert np.all(q[8:].value == [100.0, 200.0])
def test_mimic_function_unit(self):
mimic = QuantityMimic([1.0, 2.0], u.dex(u.cm / u.s**2))
d = u.Dex(mimic)
assert isinstance(d, u.Dex)
assert d.unit == u.dex(u.cm / u.s**2)
assert np.all(d.value == [1.0, 2.0])
q = u.Quantity(mimic, subok=True)
assert isinstance(q, u.Dex)
assert q.unit == u.dex(u.cm / u.s**2)
assert np.all(q.value == [1.0, 2.0])
with pytest.raises(u.UnitTypeError):
u.Quantity(mimic)
def test_masked_quantity_str_repr():
"""Ensure we don't break masked Quantity representation."""
# Really, masked quantities do not work well, but at least let the
# basics work.
masked_quantity = np.ma.array([1, 2, 3, 4] * u.kg, mask=[True, False, True, False])
str(masked_quantity)
repr(masked_quantity)
class TestQuantitySubclassAboveAndBelow:
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __array_finalize__(self, obj):
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
if hasattr(obj, "my_attr"):
self.my_attr = obj.my_attr
self.MyArray = MyArray
self.MyQuantity1 = type("MyQuantity1", (u.Quantity, MyArray), dict(my_attr="1"))
self.MyQuantity2 = type("MyQuantity2", (MyArray, u.Quantity), dict(my_attr="2"))
def test_setup(self):
mq1 = self.MyQuantity1(10, u.m)
assert isinstance(mq1, self.MyQuantity1)
assert mq1.my_attr == "1"
assert mq1.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
assert isinstance(mq2, self.MyQuantity2)
assert mq2.my_attr == "2"
assert mq2.unit is u.m
def test_attr_propagation(self):
mq1 = self.MyQuantity1(10, u.m)
mq12 = self.MyQuantity2(mq1)
assert isinstance(mq12, self.MyQuantity2)
assert not isinstance(mq12, self.MyQuantity1)
assert mq12.my_attr == "1"
assert mq12.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
mq21 = self.MyQuantity1(mq2)
assert isinstance(mq21, self.MyQuantity1)
assert not isinstance(mq21, self.MyQuantity2)
assert mq21.my_attr == "2"
assert mq21.unit is u.m
|
e85657d52a0837c636d8a43ff98f4c2ce7bb6a5859aabfcca43af8b64f867679 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED,
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS,
TBD_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
def get_wrapped_functions(*modules):
wrapped_functions = {}
for mod in modules:
for name, f in mod.__dict__.items():
if f is np.printoptions or name.startswith("_"):
continue
if callable(f) and hasattr(f, "__wrapped__"):
wrapped_functions[name] = f
return wrapped_functions
all_wrapped_functions = get_wrapped_functions(
np, np.fft, np.linalg, np.lib.recfunctions
)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup_method(self):
self.q = np.arange(9.0).reshape(3, 3) / 4.0 * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1.0 * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1.0 * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1.0 * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150.0, 350.0]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices, axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis, self.q.value) * self.q.unit**2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize("axes", ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup_method(self):
self.q = (np.arange(9.0).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.0)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value, axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True], self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.0) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.0) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.0)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.0) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.0) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.0).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25.0 * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup_method(self):
self.q1 = np.arange(6.0).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop("q_list", [self.q1, self.q2])
q_ref = kwargs.pop("q_ref", q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
# regression test for gh-13322.
self.check(np.concatenate, dtype="f4")
self.check(
np.concatenate,
q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1,
)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = (
np.concatenate([self.q1.value, self.q2.to_value(self.q1.unit)])
* self.q1.unit
)
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0.0, 1.0 * u.m], [1.0 * u.cm, 2.0 * u.km]])
assert np.all(result == np.block([[0, 1.0], [0.01, 2000.0]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = (
np.append(self.q1.value, self.q2.to_value(self.q1.unit), axis=0)
* self.q1.unit
)
assert np.all(out == expected)
a = np.arange(3.0)
result = np.append(a, 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.0).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50.0, 25.0] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.0)
result = np.insert(a, (2,), 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50.0 * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0.0 * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1.0, 6.0) * u.m
out = np.pad(q, (2, 3), "constant", constant_values=(0.0, 150.0 * u.cm))
assert out.unit == q.unit
expected = (
np.pad(q.value, (2, 3), "constant", constant_values=(0.0, 1.5)) * q.unit
)
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), "constant", constant_values=150.0 * u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), "constant", constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), "linear_ramp", end_values=(25.0 * u.cm, 0.0))
assert out3.unit == q.unit
expected3 = (
np.pad(q.value, (2, 3), "linear_ramp", end_values=(0.25, 0.0)) * q.unit
)
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.arange(54.0).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0.0, 10.0, 20.0]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
unit = self.q.unit
expected = (
np.clip(self.q.value, qmin.to_value(unit), qmax.to_value(unit)) * unit
)
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.0 * u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1.0 * u.km)
expected = np.where([True, False, True], self.q.value, 1000.0) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select(
[q < 0.55 * u.m, q > 1.0 * u.m], [q, q.to(u.cm)], default=-1.0 * u.km
)
expected = (
np.select([q.value < 0.55, q.value > 1], [q.value, q.value], default=-1000)
* u.m
)
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1 * u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1.0, 2.0] * u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.0 * u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q, nan=1.0 * u.km, posinf=2.0 * u.km, neginf=-2 * u.km)
expected = [-2000.0, 2000.0, 1000.0, 3.0, 4.0] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1.0 + 1j] * u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1.0 + 1j] * u.m)
def test_isclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 102.0, 199.0]) * u.cm
atol = 1.5 * u.cm
rtol = 1.0 * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(
q1.value, q2.to_value(q1.unit), atol=atol.to_value(q1.unit)
)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 198.0]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit), atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit**2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
atol = 2 * u.cm
rtol = 1.0 * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0.0, rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2 * u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.0 * u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.0) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@pytest.mark.parametrize("equal_nan", [False, True])
def test_array_equal_nan(self, equal_nan):
q1 = np.linspace(0, 1, num=11) * u.m
q1[0] = np.nan
q2 = q1.to(u.cm)
result = np.array_equal(q1, q2, equal_nan=equal_nan)
assert result == equal_nan
def test_array_equal_incompatible_units(self):
assert not np.array_equal([1, 2] * u.m, [1, 2] * u.s)
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0.0, 1.0, 2.0]] * 3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
def test_array_equiv_incompatible_units(self):
assert not np.array_equiv([1, 1] * u.m, [1] * u.s)
class TestNanFunctions(InvariantUnitTestSetup):
def setup_method(self):
super().setup_method()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit**2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.dot(q1, q2)
assert o == 32.0 * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32.0 + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.0).reshape(3, 4, 5) * u.m
b = np.arange(24.0).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value, axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum("...i", q1)
assert np.all(o == q1)
o = np.einsum("ii", q1)
expected = np.einsum("ii", q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum("ij,jk", q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum("ij,jk", q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum_path("...i", q1)
assert o[0] == ["einsum_path", (0,)]
o = np.einsum_path("ii", q1)
assert o[0] == ["einsum_path", (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path("ij,jk", q1, q2)
assert o[0] == ["einsum_path", (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.0) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10.0 * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.0) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.0) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.0) * x.unit
assert np.all(out == expected)
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.0, n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
spacing = 10.0 * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit / spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2.0 * u.s
y = [1.0, 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.0 * u.dex(unit), 20 * u.dex(unit), 10)
expected = np.logspace(10.0, 20.0, 10) * unit
assert np.all(out == expected)
out = np.logspace(10.0 * u.STmag, 20 * u.STmag, 10)
expected = np.logspace(10.0, 20.0, 10, base=10.0 ** (-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1.0, 7.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250.0, 2750.0]) * u.m
xp = np.arange(5.0) * u.km
yp = np.arange(5.0) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1 * u.s, 1 * u.day])
expected = (
np.piecewise(x.value, [x.value < 0, x.value >= 0], [-1, 24 * 3600]) * u.s
)
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [-1 * u.s, 1 * u.day, lambda x: 1 * u.hour]
)
expected2 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [-1, 24 * 3600, 3600])
* u.s
)
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [0, 1 * u.percent, lambda x: 1 * u.one]
)
expected3 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [0, 0.01, 1]) * u.one
)
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.0])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.0])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500.0, 2500.0, 4500.0]) * u.m
bins = np.arange(10.0) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup_method(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(
self,
function,
*args,
value_args=None,
value_kwargs=None,
expected_units=None,
**kwargs
):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(
out[bin_slice], expected[bin_slice], expected_units[bin_slice]
):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(
np.histogram, x, value_args=(x.value,), expected_units=(None, x.unit)
)
# With bins.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
value_args=(x.value, [1.25, 2.0]),
expected_units=(None, x.unit),
)
# With density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
density=True,
value_args=(x.value, [1.25, 2.0]),
expected_units=(1 / x.unit, x.unit),
)
# With weights.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit),
)
# With weights and density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
density=True,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit / x.unit, x.unit),
)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.0]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(
np.histogram2d,
x,
y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit),
)
# Check units with density.
self.check(
np.histogram2d,
x,
y,
density=True,
value_args=(x.value, y.value),
expected_units=(1 / (x.unit * y.unit), x.unit, y.unit),
)
# Check units with weights.
self.check(
np.histogram2d,
x,
y,
weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogram2d,
x,
y,
[5, inb_y],
value_args=(x.value, y.value, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, x.unit, y.unit),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogram2d,
x.value,
y.value,
bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, u.one, u.one),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(
np.histogramdd,
sample,
value_args=(sample_values,),
expected_units=(None, sample_units),
)
# Check units with density.
self.check(
np.histogramdd,
sample,
density=True,
value_args=(sample_values,),
expected_units=(1 / (self.x.unit * self.y.unit), sample_units),
)
# Check units with weights.
self.check(
np.histogramdd,
sample,
weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogramdd,
sample,
[5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, sample_units),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogramdd,
sample_values,
bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, (u.one, u.one)),
)
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(
np.histogramdd,
xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(
np.histogramdd,
(xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
def test_sort_axis(self):
self.check(np.sort, axis=0)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
self.check(np.msort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup_method(self):
self.q = np.arange(3.0) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=", ")
expected1 = "[0., 1., 2.]"
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=", ", formatter={"all": str})
expected2 = "[0.0 Jy, 1.0 Jy, 2.0 Jy]"
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.q, None, None, None, ", ", "", np._NoValue, {"float": str}
)
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=", ", formatter={"int": str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype("f4")
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup_method(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), "m", dtype="u1")
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.0) * u.m
q2 = np.arange(5.0) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([[0.0, 1.0, -1.0], [3.0, 5.0, 3.0], [0.0, 1.0, -1]]) * u.m
self.q2 = np.array([0.0, 100.0, 150.0, 200.0]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop("unit", self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True),
),
)
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize(
"kwargs",
(
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1),
),
)
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize("kwargs", (dict(), dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0.0, np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0.0, 1.0, 2.0, np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.0) * u.m
out = np.ediff1d(x, to_begin=-12.5 * u.cm, to_end=1 * u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.0) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize("function", [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1 * u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup_method(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.0).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup_method(self):
self.q = (
np.array(
[[ 1.0, -1.0, 2.0],
[ 0.0, 3.0, -1.0],
[-1.0, -1.0, 1.0]]
) << u.m
) # fmt: skip
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.0).reshape(3, 3) / 4 * u.m
tol = 1.0 * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.0) * np.arange(5.0)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = (
np.linalg.pinv(self.q.value, rcond.to_value(self.q.unit)) / self.q.unit
)
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, "unit")
@needs_array_function
def test_solve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value, rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit**2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.0 * u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1.0, 1.0, 1.0]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.0 * u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit**0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype(
[("pv", np.dtype([("pp", "f8"), ("vv", "f8")])), ("t", "f8")]
)
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[((4.0, 2.5), 0.0), ((5.0, 5.0), 1.0), ((6.0, 7.5), 2.0)], self.pv_t_dtype
)
self.pv_unit = u.StructuredUnit((u.km, u.km / u.s), ("p", "v"))
self.pv_t_unit = u.StructuredUnit((self.pv_unit, u.s), ("pv", "t"))
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit("m, (cm, um)")
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype = np.dtype([("f1", float), ("f2", float), ("f3", float)])
# It works.
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# Can't structure something that's already structured.
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
def test_merge_arrays_repeat_dtypes(self):
# Cannot merge things with repeat dtypes.
q1 = u.Quantity([(1,)], dtype=[("f1", float)])
q2 = u.Quantity([(1,)], dtype=[("f1", float)])
with pytest.raises(ValueError, match="field 'f1' occurs more than once"):
rfn.merge_arrays((q1, q2))
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays(self, flatten):
"""Test `numpy.lib.recfunctions.merge_arrays`."""
# Merge single normal array.
arr = rfn.merge_arrays(self.q_pv["p"], flatten=flatten)
assert_array_equal(arr["f0"], self.q_pv["p"])
assert arr.unit == (u.km,)
# Merge single structured array.
arr = rfn.merge_arrays(self.q_pv, flatten=flatten)
assert_array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
# Merge 1-element tuple.
arr = rfn.merge_arrays((self.q_pv,), flatten=flatten)
assert np.array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
@pytest.mark.xfail
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_nonquantities(self, flatten):
# Fails because cannot create quantity from structured array.
arr = rfn.merge_arrays((q_pv["p"], q_pv.value), flatten=flatten)
def test_merge_array_nested_structure(self):
# Merge 2-element tuples without flattening.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t))
assert_array_equal(arr["f0"], self.q_pv)
assert_array_equal(arr["f1"], self.q_pv_t)
assert arr.unit == ((u.km, u.km / u.s), ((u.km, u.km / u.s), u.s))
def test_merge_arrays_flatten_nested_structure(self):
# Merge 2-element tuple, flattening it.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t), flatten=True)
assert_array_equal(arr["p"], self.q_pv["p"])
assert_array_equal(arr["v"], self.q_pv["v"])
assert_array_equal(arr["pp"], self.q_pv_t["pv"]["pp"])
assert_array_equal(arr["vv"], self.q_pv_t["pv"]["vv"])
assert_array_equal(arr["t"], self.q_pv_t["t"])
assert arr.unit == (u.km, u.km / u.s, u.km, u.km / u.s, u.s)
def test_merge_arrays_asrecarray(self):
with pytest.raises(ValueError, match="asrecarray=True is not supported."):
rfn.merge_arrays(self.q_pv, asrecarray=True)
def test_merge_arrays_usemask(self):
with pytest.raises(ValueError, match="usemask=True is not supported."):
rfn.merge_arrays(self.q_pv, usemask=True)
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_str(self, flatten):
with pytest.raises(
TypeError, match="the Quantity implementation cannot handle"
):
rfn.merge_arrays((self.q_pv, np.array(["a", "b", "c"])), flatten=flatten)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
} # fmt: skip
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (
SUBCLASS_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(FUNCTION_HELPERS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
db765043df2f0c2f73a12bd806e67c2eca44bae4db301d20e42e7298747184c7 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import concurrent.futures
import dataclasses
import warnings
from collections import namedtuple
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.units import quantity_helper as qh
from astropy.units.quantity_helper.converters import UfuncHelpers
from astropy.units.quantity_helper.helpers import helper_sqrt
from astropy.utils.compat.optional_deps import HAS_SCIPY
testcase = namedtuple("testcase", ["f", "q_in", "q_out"])
testexc = namedtuple("testexc", ["f", "q_in", "exc", "msg"])
testwarn = namedtuple("testwarn", ["f", "q_in", "wfilter"])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results,) if not isinstance(results, tuple) else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.0e-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncHelpers:
# Note that this test should work even if scipy is present, since
# the scipy.special ufuncs are only loaded on demand.
# The test passes independently of whether erfa is already loaded
# (which will be the case for a full test, since coordinates uses it).
def test_coverage(self):
"""Test that we cover all ufunc's"""
all_np_ufuncs = {
ufunc
for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
all_q_ufuncs = qh.UNSUPPORTED_UFUNCS | set(qh.UFUNC_HELPERS.keys())
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy or erfa.
# (Since coverage for erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = {
ufunc
for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
assert all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set()
def test_scipy_registered(self):
# Should be registered as existing even if scipy is not available.
assert "scipy.special" in qh.UFUNC_HELPERS.modules
def test_removal_addition(self):
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = None
assert np.add not in qh.UFUNC_HELPERS
assert np.add in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract]
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
@pytest.mark.slow
def test_thread_safety(self, fast_thread_switching):
def dummy_ufunc(*args, **kwargs):
return np.sqrt(*args, **kwargs)
def register():
return {dummy_ufunc: helper_sqrt}
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
helpers = UfuncHelpers()
helpers.register_module(
"astropy.units.tests.test_quantity_ufuncs",
["dummy_ufunc"],
register,
)
futures = [
executor.submit(lambda: helpers[dummy_ufunc])
for i in range(workers)
]
values = [future.result() for future in futures]
assert values == [helper_sqrt] * workers
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize(
"tc",
(
testcase(
f=np.sin,
q_in=(30.0 * u.degree,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.sin,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([0.0, 1.0 / np.sqrt(2.0), 1.0]) * u.one,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(30.0 * u.degree),),
q_out=(np.radians(30.0) * u.radian,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.cos,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.cos,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([1.0, 1.0 / np.sqrt(2.0), 0.0]) * u.one,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.tan,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(np.sqrt(3.0) * u.dimensionless_unscaled,),
),
testcase(
f=np.tan,
q_in=(np.array([0.0, 45.0, 135.0, 180.0]) * u.degree,),
q_out=(np.array([0.0, 1.0, -1.0, 0.0]) * u.dimensionless_unscaled,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
q_out=(np.radians(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
),
testcase(
f=np.arctan2,
q_in=(np.array([10.0, 30.0, 70.0, 80.0]) * u.m, 2.0 * u.km),
q_out=(
np.arctan2(np.array([10.0, 30.0, 70.0, 80.0]), 2000.0) * u.radian,
),
),
testcase(
f=np.arctan2,
q_in=((np.array([10.0, 80.0]) * u.m / (2.0 * u.km)).to(u.one), 1.0),
q_out=(np.arctan2(np.array([10.0, 80.0]) / 2000.0, 1.0) * u.radian,),
),
testcase(f=np.deg2rad, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.radians, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.deg2rad, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.radians, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.rad2deg, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.degrees, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.rad2deg, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
testcase(f=np.degrees, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
),
)
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize(
"te",
(
testexc(f=np.deg2rad, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.radians, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.rad2deg, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(f=np.degrees, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(
f=np.sin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units",
),
testexc(
f=np.arcsin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities",
),
testexc(
f=np.cos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units",
),
testexc(
f=np.arccos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities",
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units",
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0 * u.s),
exc=u.UnitsError,
msg="compatible dimensions",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0),
exc=u.UnitsError,
msg="dimensionless quantities when other arg",
),
),
)
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize(
"tw",
(testwarn(f=np.arcsin, q_in=(27.0 * u.pc / (15 * u.kpc),), wfilter="error"),),
)
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4.0 * u.m, 2.0 / u.s) == 8.0 * u.m / u.s
assert np.multiply(4.0 * u.m, 2.0) == 8.0 * u.m
assert np.multiply(4.0, 2.0 / u.s) == 8.0 / u.s
def test_multiply_array(self):
assert np.all(
np.multiply(np.arange(3.0) * u.m, 2.0 / u.s)
== np.arange(0, 6.0, 2.0) * u.m / u.s
)
@pytest.mark.skipif(
not isinstance(getattr(np, "matmul", None), np.ufunc),
reason="np.matmul is not yet a gufunc",
)
def test_matmul(self):
q = np.arange(3.0) * u.m
r = np.matmul(q, q)
assert r == 5.0 * u.m**2
# less trivial case.
q1 = np.eye(3) * u.m
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
r2 = np.matmul(q1, q2)
assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit)
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4.0 * u.m, 2.0 * u.s) == function(4.0, 2.0) * u.m / u.s
assert function(4.0 * u.m, 2.0) == function(4.0, 2.0) * u.m
assert function(4.0, 2.0 * u.s) == function(4.0, 2.0) / u.s
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(
function(np.arange(3.0) * u.m, 2.0 * u.s)
== function(np.arange(3.0), 2.0) * u.m / u.s
)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1.0, 2.0, 3.0]) * u.m
divisor = np.array([3.0, 4.0, 5.0]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13.0, 19.0, 23.0])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4.0 * u.m) == 2.0 * u.m**0.5
def test_sqrt_array(self):
assert np.all(
np.sqrt(np.array([1.0, 4.0, 9.0]) * u.m)
== np.array([1.0, 2.0, 3.0]) * u.m**0.5
)
def test_square_scalar(self):
assert np.square(4.0 * u.m) == 16.0 * u.m**2
def test_square_array(self):
assert np.all(
np.square(np.array([1.0, 2.0, 3.0]) * u.m)
== np.array([1.0, 4.0, 9.0]) * u.m**2
)
def test_reciprocal_scalar(self):
assert np.reciprocal(4.0 * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(
np.reciprocal(np.array([1.0, 2.0, 4.0]) * u.m)
== np.array([1.0, 0.5, 0.25]) / u.m
)
def test_heaviside_scalar(self):
assert np.heaviside(0.0 * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert (
np.heaviside(0.0 * u.s, 25 * u.percent) == 0.25 * u.dimensionless_unscaled
)
assert np.heaviside(2.0 * u.J, 0.25) == 1.0 * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1.0, 0.0, 0.0, +1.0])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(
np.heaviside(values * u.m, halfway * u.dimensionless_unscaled)
== [0, 0.25, 0.75, +1.0] * u.dimensionless_unscaled
)
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_scalar(self, function):
assert function(8.0 * u.m**3) == 2.0 * u.m
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1.0, 8.0, 64.0])
assert np.all(function(values * u.m**3) == function(values) * u.m)
def test_power_scalar(self):
assert np.power(4.0 * u.m, 2.0) == 16.0 * u.m**2
assert np.power(4.0, 200.0 * u.cm / u.m) == u.Quantity(
16.0, u.dimensionless_unscaled
)
# regression check on #1696
assert np.power(4.0 * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(
np.power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_float_power_array(self):
assert np.all(
np.float_power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.float_power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_power_array_array(self):
with pytest.raises(ValueError):
np.power(4.0 * u.m, [2.0, 4.0])
def test_power_array_array2(self):
with pytest.raises(ValueError):
np.power([2.0, 4.0] * u.m, [2.0, 4.0])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2.0, 4.0] * u.m / u.m
powers = [2.0, 4.0]
res = np.power(q, powers)
assert np.all(res.value == q.value**powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2.0, 4.0] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2**2
assert np.all(res3.value == q2.value**2)
assert res3.unit == q2.unit**2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError, match="raise something to a dimensionless"):
np.power(3.0, 4.0 * u.m)
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.0) == 3.0 * u.m
assert np.copysign(3 * u.m, 1.0 * u.s) == 3.0 * u.m
assert np.copysign(3 * u.m, -1.0) == -3.0 * u.m
assert np.copysign(3 * u.m, -1.0 * u.s) == -3.0 * u.m
def test_copysign_array(self):
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0 * u.m)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(
np.array([1.0, 2.0, 3.0]) * u.s, np.array([-2.0, 2.0, -4.0]) * u.m
)
== np.array([-1.0, 2.0, -3.0]) * u.s
)
q = np.copysign(np.array([1.0, 2.0, 3.0]), -3 * u.m)
assert np.all(q == np.array([-1.0, -2.0, -3.0]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4.0 * u.m, 2) == 16.0 * u.m
def test_ldexp_array(self):
assert np.all(
np.ldexp(np.array([1.0, 2.0, 3.0]) * u.m, [3, 2, 1])
== np.array([8.0, 8.0, 6.0]) * u.m
)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3.0 * u.m, 4.0)
with pytest.raises(TypeError):
np.ldexp(3.0, u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_scalar(self, function):
q = function(3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(np.array([1.0 / 3.0, 1.0 / 2.0, 1.0])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function "
"to dimensionless quantities"
),
):
function(3.0 * u.m / u.s)
def test_modf_scalar(self):
q = np.modf(9.0 * u.m / (600.0 * u.cm))
assert q == (0.5 * u.dimensionless_unscaled, 1.0 * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.0) * u.m / (500.0 * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3.0 * u.m / (6.0 * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert all(
(_q0, _q1) == np.frexp(_d)
for _q0, _q1, _d in zip(q[0], q[1], [1.0 / 3.0, 1.0 / 2.0, 1.0])
)
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(3.0 * u.m / u.s)
# also does not work on quantities that can be made dimensionless
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm), 1.0)
assert q.unit == u.dimensionless_unscaled
assert_allclose(
q.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0]), 1.0)
)
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.km / u.s, 3.0 * u.m / u.s)
class TestInvariantUfuncs:
@pytest.mark.parametrize(
"ufunc",
[
np.absolute,
np.fabs,
np.conj,
np.conjugate,
np.negative,
np.spacing,
np.rint,
np.floor,
np.ceil,
np.positive,
],
)
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(
"ufunc", [np.absolute, np.conjugate, np.negative, np.rint, np.floor, np.ceil]
)
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
("ufunc", "arbitrary"),
[
(np.add, 0.0),
(np.subtract, 0.0),
(np.hypot, 0.0),
(np.maximum, 0.0),
(np.minimum, 0.0),
(np.nextafter, 0.0),
(np.remainder, np.inf),
(np.mod, np.inf),
(np.fmod, np.inf),
],
)
def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i1, arbitrary)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
class TestComparisonUfuncs:
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.0)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(
q_o2 == ufunc((q_i1 / q_i2).to_value(u.dimensionless_unscaled), 2.0)
)
# comparison with 0., inf, nan is OK even for dimensional quantities
# (though ignore numpy runtime warnings for comparisons with nan).
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
for arbitrary_unit_value in (0.0, np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value * np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0.0, np.inf, np.nan]))
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.isinf, np.isnan, np.signbit))
def test_onearg_test_ufuncs(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = ufunc(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = np.sign(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
class TestInplaceUfuncs:
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value / 10.0, out=s)
assert check is s
assert np.all(check.value == np.arcsin(value / 10.0))
assert check.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100.0 * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# can also replace input with first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.0
assert check is s
assert np.all(check.value == value / 2.0)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2.0 * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1.0 * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2.0 / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1.0, 2.0, 3.0]) * u.dimensionless_unscaled
np.add(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert np.all(s.value == np.array([3.0, 6.0, 9.0]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert_allclose(s.value, np.arctan2(1.0, 2.0))
assert s.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.0 * u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.0) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1.0 * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += 20.0 * u.km
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.parametrize("ufunc", (np.equal, np.greater))
def test_comparison_ufuncs_inplace(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
check = np.empty(q_i1.shape, bool)
ufunc(q_i1.value, q_i2.to_value(q_i1.unit), out=check)
result = np.empty(q_i1.shape, bool)
q_o = ufunc(q_i1, q_i2, out=result)
assert q_o is result
assert type(q_o) is np.ndarray
assert q_o.dtype == bool
assert np.all(q_o == check)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.signbit))
def test_onearg_test_ufuncs_inplace(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, bool)
ufunc(q.value, out=check)
result = np.empty(q.shape, bool)
out = ufunc(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign_inplace(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, q.dtype)
np.sign(q.value, out=check)
result = np.empty(q.shape, q.dtype)
out = np.sign(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
def test_ndarray_inplace_op_with_quantity(self):
"""Regression test for gh-13911."""
a = np.arange(3.0)
q = u.Quantity([12.5, 25.0], u.percent)
a[:2] += q # This used to fail
assert_array_equal(a, np.array([0.125, 1.25, 2.0]))
@pytest.mark.skipif(
not hasattr(np.core.umath, "clip"), reason="no clip ufunc available"
)
class TestClip:
"""Test the clip ufunc.
In numpy, this is hidden behind a function that does not backwards
compatibility checks. We explicitly test the ufunc here.
"""
def setup_method(self):
self.clip = np.core.umath.clip
def test_clip_simple(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
result = self.clip(q, q_min, q_max)
assert result.unit == q.unit
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
assert np.all(result == expected)
def test_clip_unitless_parts(self):
q = np.arange(-1.0, 10.0) * u.m
qlim = 0.0055 * u.km
# one-sided
result1 = self.clip(q, -np.inf, qlim)
expected1 = self.clip(q.value, -np.inf, qlim.to_value(q.unit)) * q.unit
assert np.all(result1 == expected1)
result2 = self.clip(q, qlim, np.inf)
expected2 = self.clip(q.value, qlim.to_value(q.unit), np.inf) * q.unit
assert np.all(result2 == expected2)
# Zero
result3 = self.clip(q, np.zeros(q.shape), qlim)
expected3 = self.clip(q.value, 0, qlim.to_value(q.unit)) * q.unit
assert np.all(result3 == expected3)
# Two unitless parts, array-shaped.
result4 = self.clip(q, np.zeros(q.shape), np.full(q.shape, np.inf))
expected4 = self.clip(q.value, 0, np.inf) * q.unit
assert np.all(result4 == expected4)
def test_clip_dimensionless(self):
q = np.arange(-1.0, 10.0) * u.dimensionless_unscaled
result = self.clip(q, 200 * u.percent, 5.0)
expected = self.clip(q, 2.0, 5.0)
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_ndarray(self):
a = np.arange(-1.0, 10.0)
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled)
assert isinstance(result, u.Quantity)
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_quantity_inplace(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
result = self.clip(q, q_min, q_max, out=q)
assert result is q
assert np.all(result == expected)
def test_clip_ndarray_dimensionless_output(self):
a = np.arange(-1.0, 10.0)
q = np.zeros_like(a) * u.m
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled, out=q)
assert result is q
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_errors(self):
q = np.arange(-1.0, 10.0) * u.m
with pytest.raises(u.UnitsError):
self.clip(q, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q.value, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q, -1, 0.0)
with pytest.raises(u.UnitsError):
self.clip(q, 0.0, 1.0)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.0) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.0) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.0) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.0) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.0 * u.km)
np.add.at(check, i, 1000.0)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.0 * u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1 * u.s)
# but be fine if it does not
s = np.arange(10.0) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.0 * u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.0) * u.m
np.multiply.at(s, i, 2.0)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.0 * u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.0) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.0) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.0) * u.m
s2 = np.arange(2.0) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.0) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
@dataclasses.dataclass
class DuckQuantity1:
data: u.Quantity
@dataclasses.dataclass
class DuckQuantity2(DuckQuantity1):
@property
def unit(self) -> u.UnitBase:
return self.data.unit
@dataclasses.dataclass(eq=False)
class DuckQuantity3(DuckQuantity2):
def __array_ufunc__(self, function, method, *inputs, **kwargs):
inputs = [inp.data if isinstance(inp, type(self)) else inp for inp in inputs]
if "out" in kwargs:
out = kwargs["out"]
else:
out = None
kwargs_copy = {}
for k in kwargs:
kwarg = kwargs[k]
if isinstance(kwarg, type(self)):
kwargs_copy[k] = kwarg.data
elif isinstance(kwarg, (list, tuple)):
kwargs_copy[k] = type(kwarg)(
item.data if isinstance(item, type(self)) else item
for item in kwarg
)
else:
kwargs_copy[k] = kwarg
kwargs = kwargs_copy
for inp in inputs:
if isinstance(inp, np.ndarray):
result = inp.__array_ufunc__(function, method, *inputs, **kwargs)
if result is not NotImplemented:
if out is None:
return type(self)(result)
else:
if function.nout == 1:
return out[0]
else:
return out
return NotImplemented
class TestUfuncReturnsNotImplemented:
@pytest.mark.parametrize("ufunc", (np.negative, np.abs))
class TestUnaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, duck_quantity):
with pytest.raises(TypeError, match="bad operand type for .*"):
ufunc(duck_quantity)
@pytest.mark.parametrize(
"duck_quantity", [DuckQuantity3(1 * u.mm), DuckQuantity3([1, 2] * u.mm)]
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(np.empty_like(ufunc(duck_quantity.data)))
out_expected = np.empty_like(ufunc(duck_quantity.data))
result = ufunc(duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
@pytest.mark.parametrize("ufunc", (np.add, np.multiply, np.less))
@pytest.mark.parametrize("quantity", (1 * u.m, [1, 2] * u.m))
class TestBinaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, quantity, duck_quantity):
with pytest.raises(
(TypeError, ValueError),
match=(
r"(Unsupported operand type\(s\) for ufunc .*)|"
r"(unsupported operand type\(s\) for .*)|"
r"(Value not scalar compatible or convertible to an int, float, or complex array)"
),
):
ufunc(quantity, duck_quantity)
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity3(1 * u.mm), DuckQuantity3([1, 2] * u.mm)],
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, quantity, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(
np.empty_like(ufunc(quantity, duck_quantity.data))
)
out_expected = np.empty_like(ufunc(quantity, duck_quantity.data))
result = ufunc(quantity, duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(quantity, duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
if HAS_SCIPY:
from scipy import special as sps
erf_like_ufuncs = (
sps.erf, sps.erfc, sps.erfcx, sps.erfi,
sps.gamma, sps.gammaln, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.digamma, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10,
) # fmt: skip
if isinstance(sps.erfinv, np.ufunc):
erf_like_ufuncs += (sps.erfinv, sps.erfcinv)
def test_scipy_registration():
"""Check that scipy gets loaded upon first use."""
assert sps.erf not in qh.UFUNC_HELPERS
sps.erf(1.0 * u.percent)
assert sps.erf in qh.UFUNC_HELPERS
if isinstance(sps.erfinv, np.ufunc):
assert sps.erfinv in qh.UFUNC_HELPERS
else:
assert sps.erfinv not in qh.UFUNC_HELPERS
class TestScipySpecialUfuncs:
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize("function", (sps.radian,))
def test_radian(self, function):
q1 = function(180.0 * u.degree, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0.0 * u.degree, 30.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q2.value, (30.0 * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0.0 * u.degree, 0.0 * u.arcmin, 30.0 * u.arcsec)
assert_allclose(q3.value, (30.0 * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3.0 * u.radian, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q4.value, 3.0)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3.0 * u.m, 2.0 * u.s, 1.0 * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e,
) # fmt: skip
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2.0 * u.m / (2.0 * u.m), 3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_array(self, function):
q = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m),
)
assert q.unit == u.dimensionless_unscaled
assert np.all(
q.value == function(np.ones(3), np.array([1.0 / 3.0, 1.0 / 2.0, 1.0]))
)
# should also work on quantities that can be made dimensionless
q2 = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm),
)
assert q2.unit == u.dimensionless_unscaled
assert_allclose(
q2.value,
function(np.ones(3), np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])),
)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.kg, 3.0 * u.m / u.s)
|
5d5b302f045430f2df54578b94cd7382381b73453b98e11a25f8ce9d6367ba31 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Function Units and Quantities."""
from abc import ABCMeta, abstractmethod
import numpy as np
from astropy.units import (
Quantity,
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
__all__ = ["FunctionUnitBase", "FunctionQuantity"]
SUPPORTED_UFUNCS = {
getattr(np.core.umath, ufunc)
for ufunc in (
"isfinite",
"isinf",
"isnan",
"sign",
"signbit",
"rint",
"floor",
"ceil",
"trunc",
"_ones_like",
"ones_like",
"positive",
)
if hasattr(np.core.umath, ufunc)
}
# TODO: the following could work if helper changed relative to Quantity:
# - spacing should return dimensionless, not same unit
# - negative should negate unit too,
# - add, subtract, comparisons can work if units added/subtracted
SUPPORTED_FUNCTIONS = {
getattr(np, function)
for function in ("clip", "trace", "mean", "min", "max", "round")
}
# subclassing UnitBase or CompositeUnit was found to be problematic, requiring
# a large number of overrides. Hence, define new class.
class FunctionUnitBase(metaclass=ABCMeta):
"""Abstract base class for function units.
Function units are functions containing a physical unit, such as dB(mW).
Most of the arithmetic operations on function units are defined in this
base class.
While instantiation is defined, this class should not be used directly.
Rather, subclasses should be used that override the abstract properties
`_default_function_unit` and `_quantity_class`, and the abstract methods
`from_physical`, and `to_physical`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the function unit set by the subclass.
"""
# ↓↓↓ the following four need to be set by subclasses
# Make this a property so we can ensure subclasses define it.
@property
@abstractmethod
def _default_function_unit(self):
"""Default function unit corresponding to the function.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.mag`.
"""
# This has to be a property because the function quantity will not be
# known at unit definition time, as it gets defined after.
@property
@abstractmethod
def _quantity_class(self):
"""Function quantity class corresponding to this function unit.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`.
"""
@abstractmethod
def from_physical(self, x):
"""Transformation from value in physical to value in function units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
@abstractmethod
def to_physical(self, x):
"""Transformation from value in function to value in physical units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
# ↑↑↑ the above four need to be set by subclasses
# have priority over arrays, regular units, and regular quantities
__array_priority__ = 30000
def __init__(self, physical_unit=None, function_unit=None):
if physical_unit is None:
physical_unit = dimensionless_unscaled
else:
physical_unit = Unit(physical_unit)
if not isinstance(physical_unit, UnitBase) or physical_unit.is_equivalent(
self._default_function_unit
):
raise UnitConversionError(f"{physical_unit} is not a physical unit.")
if function_unit is None:
function_unit = self._default_function_unit
else:
# any function unit should be equivalent to subclass default
function_unit = Unit(getattr(function_unit, "function_unit", function_unit))
if not function_unit.is_equivalent(self._default_function_unit):
raise UnitConversionError(
f"Cannot initialize '{self.__class__.__name__}' instance with "
f"function unit '{function_unit}', as it is not equivalent to "
f"default function unit '{self._default_function_unit}'."
)
self._physical_unit = physical_unit
self._function_unit = function_unit
def _copy(self, physical_unit=None):
"""Copy oneself, possibly with a different physical unit."""
if physical_unit is None:
physical_unit = self.physical_unit
return self.__class__(physical_unit, self.function_unit)
@property
def physical_unit(self):
return self._physical_unit
@property
def function_unit(self):
return self._function_unit
@property
def equivalencies(self):
"""List of equivalencies between function and physical units.
Uses the `from_physical` and `to_physical` methods.
"""
return [(self, self.physical_unit, self.to_physical, self.from_physical)]
# ↓↓↓ properties/methods required to behave like a unit
def decompose(self, bases=set()):
"""Copy the current unit with the physical unit decomposed.
For details, see `~astropy.units.UnitBase.decompose`.
"""
return self._copy(self.physical_unit.decompose(bases))
@property
def si(self):
"""Copy the current function unit with the physical unit in SI."""
return self._copy(self.physical_unit.si)
@property
def cgs(self):
"""Copy the current function unit with the physical unit in CGS."""
return self._copy(self.physical_unit.cgs)
def _get_physical_type_id(self):
"""Get physical type corresponding to physical unit."""
return self.physical_unit._get_physical_type_id()
@property
def physical_type(self):
"""Return the physical type of the physical unit (e.g., 'length')."""
return self.physical_unit.physical_type
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, string, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to the built-in equivalencies between the
function unit and the physical one, as well as possible global
defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`.
Use `None` to turn off any global equivalencies.
Returns
-------
bool
"""
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other_physical_unit = getattr(
other,
"physical_unit",
(
dimensionless_unscaled
if self.function_unit.is_equivalent(other)
else other
),
)
return self.physical_unit.is_equivalent(other_physical_unit, equivalencies)
def to(self, other, value=1.0, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : `~astropy.units.Unit`, `~astropy.units.FunctionUnitBase`, or str
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the specified unit.
If not provided, defaults to 1.0.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in meant to treat only equivalencies between different
physical units; the built-in equivalency between the function
unit and the physical one is automatically taken into account.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
# conversion to one's own physical unit should be fastest
if other is self.physical_unit:
return self.to_physical(value)
other_function_unit = getattr(other, "function_unit", other)
if self.function_unit.is_equivalent(other_function_unit):
# when other is an equivalent function unit:
# first convert physical units to other's physical units
other_physical_unit = getattr(
other, "physical_unit", dimensionless_unscaled
)
if self.physical_unit != other_physical_unit:
value_other_physical = self.physical_unit.to(
other_physical_unit, self.to_physical(value), equivalencies
)
# make function unit again, in own system
value = self.from_physical(value_other_physical)
# convert possible difference in function unit (e.g., dex->dB)
return self.function_unit.to(other_function_unit, value)
else:
try:
# when other is not a function unit
return self.physical_unit.to(
other, self.to_physical(value), equivalencies
)
except UnitConversionError as e:
if self.function_unit == Unit("mag"):
# One can get to raw magnitudes via math that strips the dimensions off.
# Include extra information in the exception to remind users of this.
msg = "Did you perhaps subtract magnitudes so the unit got lost?"
e.args += (msg,)
raise e
else:
raise
def is_unity(self):
return False
def __eq__(self, other):
return self.physical_unit == getattr(
other, "physical_unit", dimensionless_unscaled
) and self.function_unit == getattr(other, "function_unit", other)
def __ne__(self, other):
return not self.__eq__(other)
def __rlshift__(self, other):
"""Unit conversion operator ``<<``"""
try:
return self._quantity_class(other, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __mul__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit * other
else:
raise UnitsError(
"Cannot multiply a function unit with a physical dimension "
"with any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit / other
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"by any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(1.0 / other, unit=self)
except Exception:
return NotImplemented
def __rtruediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return other / self.function_unit
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"into any unit"
)
else:
# Don't know what to do with anything not like a unit.
return NotImplemented
def __pow__(self, power):
if power == 0:
return dimensionless_unscaled
elif power == 1:
return self._copy()
if self.physical_unit == dimensionless_unscaled:
return self.function_unit**power
raise UnitsError(
"Cannot raise a function unit with a physical dimension "
"to any power but 0 or 1."
)
def __pos__(self):
return self._copy()
def to_string(self, format="generic"):
"""
Output the unit in the given format as a string.
The physical unit is appended, within parentheses, to the function
unit, as in "dB(mW)", with both units set using the given format
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
if format not in ("generic", "unscaled", "latex", "latex_inline"):
raise ValueError(
f"Function units cannot be written in {format} "
"format. Only 'generic', 'unscaled', 'latex' and "
"'latex_inline' are supported."
)
self_str = self.function_unit.to_string(format)
pu_str = self.physical_unit.to_string(format)
if pu_str == "":
pu_str = "1"
if format.startswith("latex"):
# need to strip leading and trailing "$"
self_str += rf"$\mathrm{{\left( {pu_str[1:-1]} \right)}}$"
else:
self_str += f"({pu_str})"
return self_str
def __str__(self):
"""Return string representation for unit."""
self_str = str(self.function_unit)
pu_str = str(self.physical_unit)
if pu_str:
self_str += f"({pu_str})"
return self_str
def __repr__(self):
# By default, try to give a representation using `Unit(<string>)`,
# with string such that parsing it would give the correct FunctionUnit.
if callable(self.function_unit):
return f'Unit("{self.to_string()}")'
else:
return '{}("{}"{})'.format(
self.__class__.__name__,
self.physical_unit,
""
if self.function_unit is self._default_function_unit
else f', unit="{self.function_unit}"',
)
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return self.to_string("latex")
def __hash__(self):
return hash((self.function_unit, self.physical_unit))
class FunctionQuantity(Quantity):
"""A representation of a (scaled) function of a number with a unit.
Function quantities are quantities whose units are functions containing a
physical unit, such as dB(mW). Most of the arithmetic operations on
function quantities are defined in this base class.
While instantiation is also defined here, this class should not be
instantiated directly. Rather, subclasses should be made which have
``_unit_class`` pointing back to the corresponding function unit class.
Parameters
----------
value : number, quantity-like, or sequence thereof
The numerical value of the function quantity. If a number or
a `~astropy.units.Quantity` with a function unit, it will be converted
to ``unit`` and the physical unit will be inferred from ``unit``.
If a `~astropy.units.Quantity` with just a physical unit, it will
converted to the function unit, after, if necessary, converting it to
the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.FunctionUnitBase`, optional
For an `~astropy.units.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. Ignored
if the input does not need to be converted and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be of the
class used. Otherwise, subclasses will be passed through.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`~astropy.units.Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not a `~astropy.units.FunctionUnitBase`
or `~astropy.units.Unit` object, or a parseable string unit.
"""
_unit_class = None
"""Default `~astropy.units.FunctionUnitBase` subclass.
This should be overridden by subclasses.
"""
# Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit.
__array_priority__ = 40000
# Define functions that work on FunctionQuantity.
_supported_ufuncs = SUPPORTED_UFUNCS
_supported_functions = SUPPORTED_FUNCTIONS
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# Convert possible string input to a (function) unit.
unit = Unit(unit)
if not isinstance(unit, FunctionUnitBase):
# By default, use value's physical unit.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# if iterable, see if first item has a unit
# (mixed lists fail in super call below).
try:
value_unit = getattr(value[0], "unit")
except Exception:
pass
physical_unit = getattr(value_unit, "physical_unit", value_unit)
unit = cls._unit_class(physical_unit, function_unit=unit)
# initialise!
return super().__new__(
cls,
value,
unit,
dtype=dtype,
copy=copy,
order=order,
subok=subok,
ndmin=ndmin,
)
# ↓↓↓ properties not found in Quantity
@property
def physical(self):
"""The physical quantity corresponding the function one."""
return self.to(self.unit.physical_unit)
@property
def _function_view(self):
"""View as Quantity with function unit, dropping the physical unit.
Use `~astropy.units.quantity.Quantity.value` for just the value.
"""
return self._new_view(unit=self.unit.function_unit)
# ↓↓↓ methods overridden to change the behavior
@property
def si(self):
"""Return a copy with the physical unit in SI units."""
return self.__class__(self.physical.si)
@property
def cgs(self):
"""Return a copy with the physical unit in CGS units."""
return self.__class__(self.physical.cgs)
def decompose(self, bases=[]):
"""Generate a new instance with the physical unit decomposed.
For details, see `~astropy.units.Quantity.decompose`.
"""
return self.__class__(self.physical.decompose(bases))
# ↓↓↓ methods overridden to add additional behavior
def __quantity_subclass__(self, unit):
if isinstance(unit, FunctionUnitBase):
return self.__class__, True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if not isinstance(unit, self._unit_class):
# Have to take care of, e.g., (10*u.mag).view(u.Magnitude)
try:
# "or 'nonsense'" ensures `None` breaks, just in case.
unit = self._unit_class(function_unit=unit or "nonsense")
except Exception:
raise UnitTypeError(
f"{type(self).__name__} instances require"
f" {self._unit_class.__name__} function units, so cannot set it to"
f" '{unit}'."
)
self._unit = unit
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# TODO: it would be more logical to have this in Quantity already,
# instead of in UFUNC_HELPERS, where it cannot be overridden.
# And really it should just return NotImplemented, since possibly
# another argument might know what to do.
if function not in self._supported_ufuncs:
raise UnitTypeError(
f"Cannot use ufunc '{function.__name__}' with function quantities"
)
return super().__array_ufunc__(function, method, *inputs, **kwargs)
def _maybe_new_view(self, result):
"""View as function quantity if the unit is unchanged.
Used for the case that self.unit.physical_unit is dimensionless,
where multiplication and division is done using the Quantity
equivalent, to transform them back to a FunctionQuantity if possible.
"""
if isinstance(result, Quantity) and result.unit == self.unit:
return self._new_view(result)
else:
return result
# ↓↓↓ methods overridden to change behavior
def __mul__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view * other)
raise UnitTypeError(
"Cannot multiply function quantities which are not dimensionless "
"with anything."
)
def __truediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view / other)
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless by anything."
)
def __rtruediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view.__rtruediv__(other))
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless "
"into anything."
)
def _comparison(self, other, comparison_func):
"""Do a comparison between self and other, raising UnitsError when
other cannot be converted to self because it has different physical
unit, and returning NotImplemented when there are other errors."""
try:
# will raise a UnitsError if physical units not equivalent
other_in_own_unit = self._to_own_unit(other, check_precision=False)
except UnitsError as exc:
if self.unit.physical_unit != dimensionless_unscaled:
raise exc
try:
other_in_own_unit = self._function_view._to_own_unit(
other, check_precision=False
)
except Exception:
raise exc
except Exception:
return NotImplemented
return comparison_func(other_in_own_unit)
def __eq__(self, other):
try:
return self._comparison(other, self.value.__eq__)
except UnitsError:
return False
def __ne__(self, other):
try:
return self._comparison(other, self.value.__ne__)
except UnitsError:
return True
def __gt__(self, other):
return self._comparison(other, self.value.__gt__)
def __ge__(self, other):
return self._comparison(other, self.value.__ge__)
def __lt__(self, other):
return self._comparison(other, self.value.__lt__)
def __le__(self, other):
return self._comparison(other, self.value.__le__)
def __lshift__(self, other):
"""Unit conversion operator `<<`"""
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
# Ensure Quantity methods are used only if they make sense.
def _wrap_function(self, function, *args, **kwargs):
if function in self._supported_functions:
return super()._wrap_function(function, *args, **kwargs)
# For dimensionless, we can convert to regular quantities.
if all(
arg.unit.physical_unit == dimensionless_unscaled
for arg in (self,) + args
if (hasattr(arg, "unit") and hasattr(arg.unit, "physical_unit"))
):
args = tuple(getattr(arg, "_function_view", arg) for arg in args)
return self._function_view._wrap_function(function, *args, **kwargs)
raise TypeError(
f"Cannot use method that uses function '{function.__name__}' with "
"function quantities that are not dimensionless."
)
# Override functions that are supported but do not use _wrap_function
# in Quantity.
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out, keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(
np.clip, self._to_own_unit(a_min), self._to_own_unit(a_max), out=out
)
|
ee136f0a2e1635047737ac42100696a14bef6c9b30d93ac0782845c6b163ce54 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization and other aspects of Angle and subclasses"""
import pickle
import threading
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.coordinates.angles import Angle, Latitude, Longitude
from astropy.coordinates.errors import (
IllegalHourError,
IllegalMinuteError,
IllegalMinuteWarning,
IllegalSecondError,
IllegalSecondWarning,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_create_angles():
"""
Tests creating and accessing Angle objects
"""
""" The "angle" is a fundamental object. The internal
representation is stored in radians, but this is transparent to the user.
Units *must* be specified rather than a default value be assumed. This is
as much for self-documenting code as anything else.
Angle objects simply represent a single angular coordinate. More specific
angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle."""
a1 = Angle(54.12412, unit=u.degree)
a2 = Angle("54.12412", unit=u.degree)
a3 = Angle("54:07:26.832", unit=u.degree)
a4 = Angle("54.12412 deg")
a5 = Angle("54.12412 degrees")
a6 = Angle("54.12412°") # because we like Unicode
a8 = Angle("54°07'26.832\"")
a9 = Angle([54, 7, 26.832], unit=u.degree)
assert_allclose(a9.value, [54, 7, 26.832])
assert a9.unit is u.degree
a10 = Angle(3.60827466667, unit=u.hour)
a11 = Angle("3:36:29.7888000120", unit=u.hour)
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hour"):
a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hour"):
# Regression test for #5001
a13 = Angle((3, 36, 29.7888000120), unit="hour")
Angle(0.944644098745, unit=u.radian)
with pytest.raises(u.UnitsError):
Angle(54.12412)
# raises an exception because this is ambiguous
with pytest.raises(u.UnitsError):
Angle(54.12412, unit=u.m)
with pytest.raises(ValueError):
Angle(12.34, unit="not a unit")
a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous
a15 = Angle("5h4m3s") # single digits, no decimal
assert a15.unit == u.hourangle
a16 = Angle("1 d")
a17 = Angle("1 degree")
assert a16.degree == 1
assert a17.degree == 1
a18 = Angle("54 07.4472", unit=u.degree)
a19 = Angle("54:07.4472", unit=u.degree)
a20 = Angle("54d07.4472m", unit=u.degree)
a21 = Angle("3h36m", unit=u.hour)
a22 = Angle("3.6h", unit=u.hour)
a23 = Angle("- 3h", unit=u.hour)
a24 = Angle("+ 3h", unit=u.hour)
a25 = Angle(3.0, unit=u.hour**1)
# ensure the above angles that should match do
assert a1 == a2 == a3 == a4 == a5 == a6 == a8 == a18 == a19 == a20
assert_allclose(a1.radian, a2.radian)
assert_allclose(a2.degree, a3.degree)
assert_allclose(a3.radian, a4.radian)
assert_allclose(a4.radian, a5.radian)
assert_allclose(a5.radian, a6.radian)
assert_allclose(a10.degree, a11.degree)
assert a11 == a12 == a13 == a14
assert a21 == a22
assert a23 == -a24
assert a24 == a25
# check for illegal ranges / values
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.degree)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.degree)
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.hour)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.hour)
with pytest.raises(IllegalHourError):
a = Angle("99 25 51.0", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12 25 51.0xxx", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12h34321m32.2s")
assert a1 is not None
def test_angle_from_view():
q = np.arange(3.0) * u.deg
a = q.view(Angle)
assert type(a) is Angle
assert a.unit is q.unit
assert np.all(a == q)
q2 = np.arange(4) * u.m
with pytest.raises(u.UnitTypeError):
q2.view(Angle)
def test_angle_ops():
"""
Tests operations on Angle objects
"""
# Angles can be added and subtracted. Multiplication and division by a
# scalar is also permitted. A negative operator is also valid. All of
# these operate in a single dimension. Attempting to multiply or divide two
# Angle objects will return a quantity. An exception will be raised if it
# is attempted to store output with a non-angular unit in an Angle [#2718].
a1 = Angle(3.60827466667, unit=u.hour)
a2 = Angle("54:07:26.832", unit=u.degree)
a1 + a2 # creates new Angle object
a1 - a2
-a1
assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003)
assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10
# commutativity
assert (2 * a1).hour == (a1 * 2).hour
a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1
assert_allclose(a1.radian, a3.radian)
assert a1 is not a3
a4 = abs(-a1)
assert a4.radian == a1.radian
a5 = Angle(5.0, unit=u.hour)
assert a5 > a1
assert a5 >= a1
assert a1 < a5
assert a1 <= a5
# check operations with non-angular result give Quantity.
a6 = Angle(45.0, u.degree)
a7 = a6 * a5
assert type(a7) is u.Quantity
# but those with angular result yield Angle.
# (a9 is regression test for #5327)
a8 = a1 + 1.0 * u.deg
assert type(a8) is Angle
a9 = 1.0 * u.deg + a1
assert type(a9) is Angle
with pytest.raises(TypeError):
a6 *= a5
with pytest.raises(TypeError):
a6 *= u.m
with pytest.raises(TypeError):
np.sin(a6, out=a6)
def test_angle_methods():
# Most methods tested as part of the Quantity tests.
# A few tests here which caused problems before: #8368
a = Angle([0.0, 2.0], "deg")
a_mean = a.mean()
assert type(a_mean) is Angle
assert a_mean == 1.0 * u.degree
a_std = a.std()
assert type(a_std) is Angle
assert a_std == 1.0 * u.degree
a_var = a.var()
assert type(a_var) is u.Quantity
assert a_var == 1.0 * u.degree**2
a_ptp = a.ptp()
assert type(a_ptp) is Angle
assert a_ptp == 2.0 * u.degree
a_max = a.max()
assert type(a_max) is Angle
assert a_max == 2.0 * u.degree
a_min = a.min()
assert type(a_min) is Angle
assert a_min == 0.0 * u.degree
def test_angle_convert():
"""
Test unit conversion of Angle objects
"""
angle = Angle("54.12412", unit=u.degree)
assert_allclose(angle.hour, 3.60827466667)
assert_allclose(angle.radian, 0.944644098745)
assert_allclose(angle.degree, 54.12412)
assert len(angle.hms) == 3
assert isinstance(angle.hms, tuple)
assert angle.hms[0] == 3
assert angle.hms[1] == 36
assert_allclose(angle.hms[2], 29.78879999999947)
# also check that the namedtuple attribute-style access works:
assert angle.hms.h == 3
assert angle.hms.m == 36
assert_allclose(angle.hms.s, 29.78879999999947)
assert len(angle.dms) == 3
assert isinstance(angle.dms, tuple)
assert angle.dms[0] == 54
assert angle.dms[1] == 7
assert_allclose(angle.dms[2], 26.831999999992036)
# also check that the namedtuple attribute-style access works:
assert angle.dms.d == 54
assert angle.dms.m == 7
assert_allclose(angle.dms.s, 26.831999999992036)
assert isinstance(angle.dms[0], float)
assert isinstance(angle.hms[0], float)
# now make sure dms and signed_dms work right for negative angles
negangle = Angle("-54.12412", unit=u.degree)
assert negangle.dms.d == -54
assert negangle.dms.m == -7
assert_allclose(negangle.dms.s, -26.831999999992036)
assert negangle.signed_dms.sign == -1
assert negangle.signed_dms.d == 54
assert negangle.signed_dms.m == 7
assert_allclose(negangle.signed_dms.s, 26.831999999992036)
def test_angle_formatting():
"""
Tests string formatting for Angle objects
"""
"""
The string method of Angle has this signature:
def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5,
pad=False):
The "decimal" parameter defaults to False since if you need to print the
Angle as a decimal, there's no need to use the "format" method (see
above).
"""
angle = Angle("54.12412", unit=u.degree)
# __str__ is the default `format`
assert str(angle) == angle.to_string()
res = "Angle as HMS: 3h36m29.7888s"
assert f"Angle as HMS: {angle.to_string(unit=u.hour)}" == res
res = "Angle as HMS: 3:36:29.7888"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':')}" == res
res = "Angle as HMS: 3:36:29.79"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':', precision=2)}" == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = "Angle as HMS: 3h36m29.7888s"
assert (
"Angle as HMS:"
f" {angle.to_string(unit=u.hour, sep=('h', 'm', 's'), precision=4)}" == res
)
res = "Angle as HMS: 3-36|29.7888"
assert (
f"Angle as HMS: {angle.to_string(unit=u.hour, sep=['-', '|'], precision=4)}"
== res
)
res = "Angle as HMS: 3-36-29.7888"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep='-', precision=4)}" == res
res = "Angle as HMS: 03h36m29.7888s"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, precision=4, pad=True)}" == res
# Same as above, in degrees
angle = Angle("3 36 29.78880", unit=u.degree)
res = "Angle as DMS: 3d36m29.7888s"
assert f"Angle as DMS: {angle.to_string(unit=u.degree)}" == res
res = "Angle as DMS: 3:36:29.7888"
assert f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':')}" == res
res = "Angle as DMS: 3:36:29.79"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':', precision=2)}" == res
)
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = "Angle as DMS: 3d36m29.7888s"
assert (
f"Angle as DMS: {angle.to_string(unit=u.deg, sep=('d', 'm', 's'), precision=4)}"
== res
)
res = "Angle as DMS: 3-36|29.7888"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, sep=['-', '|'], precision=4)}"
== res
)
res = "Angle as DMS: 3-36-29.7888"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, sep='-', precision=4)}" == res
)
res = "Angle as DMS: 03d36m29.7888s"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, precision=4, pad=True)}" == res
)
res = "Angle as rad: 0.0629763rad"
assert f"Angle as rad: {angle.to_string(unit=u.radian)}" == res
res = "Angle as rad decimal: 0.0629763"
assert (
f"Angle as rad decimal: {angle.to_string(unit=u.radian, decimal=True)}" == res
)
# check negative angles
angle = Angle(-1.23456789, unit=u.degree)
angle2 = Angle(-1.23456789, unit=u.hour)
assert angle.to_string() == "-1d14m04.444404s"
assert angle.to_string(pad=True) == "-01d14m04.444404s"
assert angle.to_string(unit=u.hour) == "-0h04m56.2962936s"
assert angle2.to_string(unit=u.hour, pad=True) == "-01h14m04.444404s"
assert angle.to_string(unit=u.radian, decimal=True) == "-0.0215473"
# We should recognize units that are equal but not identical
assert angle.to_string(unit=u.hour**1) == "-0h04m56.2962936s"
def test_to_string_vector():
# Regression test for the fact that vectorize doesn't work with Numpy 1.6
assert (
Angle([1.0 / 7.0, 1.0 / 7.0], unit="deg").to_string()[0] == "0d08m34.28571429s"
)
assert Angle([1.0 / 7.0], unit="deg").to_string()[0] == "0d08m34.28571429s"
assert Angle(1.0 / 7.0, unit="deg").to_string() == "0d08m34.28571429s"
def test_angle_format_roundtripping():
"""
Ensures that the string representation of an angle can be used to create a
new valid Angle.
"""
a1 = Angle(0, unit=u.radian)
a2 = Angle(10, unit=u.degree)
a3 = Angle(0.543, unit=u.degree)
a4 = Angle("1d2m3.4s")
assert Angle(str(a1)).degree == a1.degree
assert Angle(str(a2)).degree == a2.degree
assert Angle(str(a3)).degree == a3.degree
assert Angle(str(a4)).degree == a4.degree
# also check Longitude/Latitude
ra = Longitude("1h2m3.4s")
dec = Latitude("1d2m3.4s")
assert_allclose(Angle(str(ra)).degree, ra.degree)
assert_allclose(Angle(str(dec)).degree, dec.degree)
def test_radec():
"""
Tests creation/operations of Longitude and Latitude objects
"""
"""
Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude
and Latitude can parse any unambiguous format (tuples, formatted strings, etc.).
The intention is not to create an Angle subclass for every possible
coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude
are so prevalent in astronomy that it's worth creating ones for these
units. They will be noted as "special" in the docs and use of the just the
Angle class is to be used for other coordinate systems.
"""
with pytest.raises(u.UnitsError):
ra = Longitude("4:08:15.162342") # error - hours or degrees?
with pytest.raises(u.UnitsError):
ra = Longitude("-4:08:15.162342")
# the "smart" initializer allows >24 to automatically do degrees, but the
# Angle-based one does not
# TODO: adjust in 0.3 for whatever behavior is decided on
# ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24
# assert_allclose(ra.degree, 26.570929342)
with pytest.raises(u.UnitsError):
ra = Longitude("26:34:15.345634")
# ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(12)
with pytest.raises(ValueError):
ra = Longitude("garbage containing a d and no units")
ra = Longitude("12h43m23s")
assert_allclose(ra.hour, 12.7230555556)
# TODO: again, fix based on >24 behavior
# ra = Longitude((56,14,52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((56, 14, 52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((12, 14, 52)) # ambiguous w/o units
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hours"):
ra = Longitude((12, 14, 52), unit=u.hour)
# Units can be specified
ra = Longitude("4:08:15.162342", unit=u.hour)
# TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately
# Where Longitude values are commonly found in hours or degrees, declination is
# nearly always specified in degrees, so this is the default.
# dec = Latitude("-41:08:15.162342")
with pytest.raises(u.UnitsError):
dec = Latitude("-41:08:15.162342")
dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above
def test_negative_zero_dms():
# Test for DMS parser
a = Angle("-00:00:10", u.deg)
assert_allclose(a.degree, -10.0 / 3600.0)
# Unicode minus
a = Angle("−00:00:10", u.deg)
assert_allclose(a.degree, -10.0 / 3600.0)
def test_negative_zero_dm():
# Test for DM parser
a = Angle("-00:10", u.deg)
assert_allclose(a.degree, -10.0 / 60.0)
def test_negative_zero_hms():
# Test for HMS parser
a = Angle("-00:00:10", u.hour)
assert_allclose(a.hour, -10.0 / 3600.0)
def test_negative_zero_hm():
# Test for HM parser
a = Angle("-00:10", u.hour)
assert_allclose(a.hour, -10.0 / 60.0)
def test_negative_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle("-00:60", u.hour)
assert_allclose(a.hour, -1.0)
def test_plus_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle("00:60", u.hour)
assert_allclose(a.hour, 1.0)
def test_negative_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("-00:59:60", u.deg)
assert_allclose(a.degree, -1.0)
def test_plus_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("+00:59:60", u.deg)
assert_allclose(a.degree, 1.0)
def test_negative_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("-00:00:60", u.deg)
assert_allclose(a.degree, -1.0 / 60.0)
def test_plus_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("+00:00:60", u.deg)
assert_allclose(a.degree, 1.0 / 60.0)
def test_angle_to_is_angle():
with pytest.warns(IllegalSecondWarning):
a = Angle("00:00:60", u.deg)
assert isinstance(a, Angle)
assert isinstance(a.to(u.rad), Angle)
def test_angle_to_quantity():
with pytest.warns(IllegalSecondWarning):
a = Angle("00:00:60", u.deg)
q = u.Quantity(a)
assert isinstance(q, u.Quantity)
assert q.unit is u.deg
def test_quantity_to_angle():
a = Angle(1.0 * u.deg)
assert isinstance(a, Angle)
with pytest.raises(u.UnitsError):
Angle(1.0 * u.meter)
a = Angle(1.0 * u.hour)
assert isinstance(a, Angle)
assert a.unit is u.hourangle
with pytest.raises(u.UnitsError):
Angle(1.0 * u.min)
def test_angle_string():
with pytest.warns(IllegalSecondWarning):
a = Angle("00:00:60", u.deg)
assert str(a) == "0d01m00s"
a = Angle("00:00:59S", u.deg)
assert str(a) == "-0d00m59s"
a = Angle("00:00:59N", u.deg)
assert str(a) == "0d00m59s"
a = Angle("00:00:59E", u.deg)
assert str(a) == "0d00m59s"
a = Angle("00:00:59W", u.deg)
assert str(a) == "-0d00m59s"
a = Angle("-00:00:10", u.hour)
assert str(a) == "-0h00m10s"
a = Angle("00:00:59E", u.hour)
assert str(a) == "0h00m59s"
a = Angle("00:00:59W", u.hour)
assert str(a) == "-0h00m59s"
a = Angle(3.2, u.radian)
assert str(a) == "3.2rad"
a = Angle(4.2, u.microarcsecond)
assert str(a) == "4.2uarcsec"
a = Angle("1.0uarcsec")
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecN")
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecS")
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecE")
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecW")
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("3d")
assert_allclose(a.value, 3.0)
assert a.unit == u.degree
a = Angle("3dN")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dS")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle("3dE")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dW")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle('10"')
assert_allclose(a.value, 10.0)
assert a.unit == u.arcsecond
a = Angle("10'N")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'S")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("10'E")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'W")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("45°55′12″N")
assert str(a) == "45d55m12s"
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle("45°55′12″S")
assert str(a) == "-45d55m12s"
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
a = Angle("45°55′12″E")
assert str(a) == "45d55m12s"
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle("45°55′12″W")
assert str(a) == "-45d55m12s"
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
with pytest.raises(ValueError):
Angle("00h00m10sN")
with pytest.raises(ValueError):
Angle("45°55′12″NS")
def test_angle_repr():
assert "Angle" in repr(Angle(0, u.deg))
assert "Longitude" in repr(Longitude(0, u.deg))
assert "Latitude" in repr(Latitude(0, u.deg))
a = Angle(0, u.deg)
repr(a)
def test_large_angle_representation():
"""Test that angles above 360 degrees can be output as strings,
in repr, str, and to_string. (regression test for #1413)"""
a = Angle(350, u.deg) + Angle(350, u.deg)
a.to_string()
a.to_string(u.hourangle)
repr(a)
repr(a.to(u.hourangle))
str(a)
str(a.to(u.hourangle))
def test_wrap_at_inplace():
a = Angle([-20, 150, 350, 360] * u.deg)
out = a.wrap_at("180d", inplace=True)
assert out is None
assert np.all(a.degree == np.array([-20.0, 150.0, -10.0, 0.0]))
def test_latitude():
with pytest.raises(ValueError):
lat = Latitude(["91d", "89d"])
with pytest.raises(ValueError):
lat = Latitude("-91d")
lat = Latitude(["90d", "89d"])
# check that one can get items
assert lat[0] == 90 * u.deg
assert lat[1] == 89 * u.deg
# and that comparison with angles works
assert np.all(lat == Angle(["90d", "89d"]))
# check setitem works
lat[1] = 45.0 * u.deg
assert np.all(lat == Angle(["90d", "45d"]))
# but not with values out of range
with pytest.raises(ValueError):
lat[0] = 90.001 * u.deg
with pytest.raises(ValueError):
lat[0] = -90.001 * u.deg
# these should also not destroy input (#1851)
assert np.all(lat == Angle(["90d", "45d"]))
# conserve type on unit change (closes #1423)
angle = lat.to("radian")
assert type(angle) is Latitude
# but not on calculations
angle = lat - 190 * u.deg
assert type(angle) is Angle
assert angle[0] == -100 * u.deg
lat = Latitude("80d")
angle = lat / 2.0
assert type(angle) is Angle
assert angle == 40 * u.deg
angle = lat * 2.0
assert type(angle) is Angle
assert angle == 160 * u.deg
angle = -lat
assert type(angle) is Angle
assert angle == -80 * u.deg
# Test errors when trying to interoperate with longitudes.
with pytest.raises(
TypeError, match="A Latitude angle cannot be created from a Longitude angle"
):
lon = Longitude(10, "deg")
lat = Latitude(lon)
with pytest.raises(
TypeError, match="A Longitude angle cannot be assigned to a Latitude angle"
):
lon = Longitude(10, "deg")
lat = Latitude([20], "deg")
lat[0] = lon
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lon = Longitude(10, "deg")
lat = Latitude(Angle(lon))
assert lat.value == 10.0
# Check setitem.
lon = Longitude(10, "deg")
lat = Latitude([20], "deg")
lat[0] = Angle(lon)
assert lat.value[0] == 10.0
def test_longitude():
# Default wrapping at 360d with an array input
lon = Longitude(["370d", "88d"])
assert np.all(lon == Longitude(["10d", "88d"]))
assert np.all(lon == Angle(["10d", "88d"]))
# conserve type on unit change and keep wrap_angle (closes #1423)
angle = lon.to("hourangle")
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[0]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[1:]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
# but not on calculations
angle = lon / 2.0
assert np.all(angle == Angle(["5d", "44d"]))
assert type(angle) is Angle
assert not hasattr(angle, "wrap_angle")
angle = lon * 2.0 + 400 * u.deg
assert np.all(angle == Angle(["420d", "576d"]))
assert type(angle) is Angle
# Test setting a mutable value and having it wrap
lon[1] = -10 * u.deg
assert np.all(lon == Angle(["10d", "350d"]))
# Test wrapping and try hitting some edge cases
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
assert np.all(lon.degree == np.array([0.0, 90, 180, 270, 0]))
lon = Longitude(
np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle="180d"
)
assert np.all(lon.degree == np.array([0.0, 90, -180, -90, 0]))
# Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle)
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
lon.wrap_angle = "180d"
assert np.all(lon.degree == np.array([0.0, 90, -180, -90, 0]))
lon = Longitude("460d")
assert lon == Angle("100d")
lon.wrap_angle = "90d"
assert lon == Angle("-260d")
# check that if we initialize a longitude with another longitude,
# wrap_angle is kept by default
lon2 = Longitude(lon)
assert lon2.wrap_angle == lon.wrap_angle
# but not if we explicitly set it
lon3 = Longitude(lon, wrap_angle="180d")
assert lon3.wrap_angle == 180 * u.deg
# check that wrap_angle is always an Angle
lon = Longitude(lon, wrap_angle=Longitude(180 * u.deg))
assert lon.wrap_angle == 180 * u.deg
assert lon.wrap_angle.__class__ is Angle
# check that wrap_angle is not copied
wrap_angle = 180 * u.deg
lon = Longitude(lon, wrap_angle=wrap_angle)
assert lon.wrap_angle == 180 * u.deg
assert np.may_share_memory(lon.wrap_angle, wrap_angle)
# check for problem reported in #2037 about Longitude initializing to -0
lon = Longitude(0, u.deg)
lonstr = lon.to_string()
assert not lonstr.startswith("-")
# also make sure dtype is correctly conserved
assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float)
assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int)
# Test errors when trying to interoperate with latitudes.
with pytest.raises(
TypeError, match="A Longitude angle cannot be created from a Latitude angle"
):
lat = Latitude(10, "deg")
lon = Longitude(lat)
with pytest.raises(
TypeError, match="A Latitude angle cannot be assigned to a Longitude angle"
):
lat = Latitude(10, "deg")
lon = Longitude([20], "deg")
lon[0] = lat
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lat = Latitude(10, "deg")
lon = Longitude(Angle(lat))
assert lon.value == 10.0
# Check setitem.
lat = Latitude(10, "deg")
lon = Longitude([20], "deg")
lon[0] = Angle(lat)
assert lon.value[0] == 10.0
def test_wrap_at():
a = Angle([-20, 150, 350, 360] * u.deg)
assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340.0, 150.0, 350.0, 0.0]))
assert np.all(
a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340.0, 150.0, 350.0, 0.0])
)
assert np.all(a.wrap_at("360d").degree == np.array([340.0, 150.0, 350.0, 0.0]))
assert np.all(a.wrap_at("180d").degree == np.array([-20.0, 150.0, -10.0, 0.0]))
assert np.all(
a.wrap_at(np.pi * u.rad).degree == np.array([-20.0, 150.0, -10.0, 0.0])
)
# Test wrapping a scalar Angle
a = Angle("190d")
assert a.wrap_at("180d") == Angle("-170d")
a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg)
for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125):
aw = a.wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
aw = a.to(u.rad).wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
def test_is_within_bounds():
a = Angle([-20, 150, 350] * u.deg)
assert a.is_within_bounds("0d", "360d") is False
assert a.is_within_bounds(None, "360d") is True
assert a.is_within_bounds(-30 * u.deg, None) is True
a = Angle("-20d")
assert a.is_within_bounds("0d", "360d") is False
assert a.is_within_bounds(None, "360d") is True
assert a.is_within_bounds(-30 * u.deg, None) is True
def test_angle_mismatched_unit():
a = Angle("+6h7m8s", unit=u.degree)
assert_allclose(a.value, 91.78333333333332)
def test_regression_formatting_negative():
# Regression test for a bug that caused:
#
# >>> Angle(-1., unit='deg').to_string()
# '-1d00m-0s'
assert Angle(-0.0, unit="deg").to_string() == "-0d00m00s"
assert Angle(-1.0, unit="deg").to_string() == "-1d00m00s"
assert Angle(-0.0, unit="hour").to_string() == "-0h00m00s"
assert Angle(-1.0, unit="hour").to_string() == "-1h00m00s"
def test_regression_formatting_default_precision():
# Regression test for issue #11140
assert Angle("10:20:30.12345678d").to_string() == "10d20m30.12345678s"
assert Angle("10d20m30.123456784564s").to_string() == "10d20m30.12345678s"
assert Angle("10d20m30.123s").to_string() == "10d20m30.123s"
def test_empty_sep():
a = Angle("05h04m31.93830s")
assert a.to_string(sep="", precision=2, pad=True) == "050431.94"
def test_create_tuple():
"""
Tests creation of an angle with an (h,m,s) tuple
(d, m, s) tuples are not tested because of sign ambiguity issues (#13162)
"""
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hours"):
a1 = Angle((1, 30, 0), unit=u.hourangle)
assert a1.value == 1.5
def test_list_of_quantities():
a1 = Angle([1 * u.deg, 1 * u.hourangle])
assert a1.unit == u.deg
assert_allclose(a1.value, [1, 15])
a2 = Angle([1 * u.hourangle, 1 * u.deg], u.deg)
assert a2.unit == u.deg
assert_allclose(a2.value, [15, 1])
def test_multiply_divide():
# Issue #2273
a1 = Angle([1, 2, 3], u.deg)
a2 = Angle([4, 5, 6], u.deg)
a3 = a1 * a2
assert_allclose(a3.value, [4, 10, 18])
assert a3.unit == (u.deg * u.deg)
a3 = a1 / a2
assert_allclose(a3.value, [0.25, 0.4, 0.5])
assert a3.unit == u.dimensionless_unscaled
def test_mixed_string_and_quantity():
a1 = Angle(["1d", 1.0 * u.deg])
assert_array_equal(a1.value, [1.0, 1.0])
assert a1.unit == u.deg
a2 = Angle(["1d", 1 * u.rad * np.pi, "3d"])
assert_array_equal(a2.value, [1.0, 180.0, 3.0])
assert a2.unit == u.deg
def test_array_angle_tostring():
aobj = Angle([1, 2], u.deg)
assert aobj.to_string().dtype.kind == "U"
assert np.all(aobj.to_string() == ["1d00m00s", "2d00m00s"])
def test_wrap_at_without_new():
"""
Regression test for subtle bugs from situations where an Angle is
created via numpy channels that don't do the standard __new__ but instead
depend on array_finalize to set state. Longitude is used because the
bug was in its _wrap_angle not getting initialized correctly
"""
l1 = Longitude([1] * u.deg)
l2 = Longitude([2] * u.deg)
l = np.concatenate([l1, l2])
assert l._wrap_angle is not None
def test__str__():
"""
Check the __str__ method used in printing the Angle
"""
# scalar angle
scangle = Angle("10.2345d")
strscangle = scangle.__str__()
assert strscangle == "10d14m04.2s"
# non-scalar array angles
arrangle = Angle(["10.2345d", "-20d"])
strarrangle = arrangle.__str__()
assert strarrangle == "[10d14m04.2s -20d00m00s]"
# summarizing for large arrays, ... should appear
bigarrangle = Angle(np.ones(10000), u.deg)
assert "..." in bigarrangle.__str__()
def test_repr_latex():
"""
Check the _repr_latex_ method, used primarily by IPython notebooks
"""
# try with both scalar
scangle = Angle(2.1, u.deg)
rlscangle = scangle._repr_latex_()
# and array angles
arrangle = Angle([1, 2.1], u.deg)
rlarrangle = arrangle._repr_latex_()
assert rlscangle == r"$2^\circ06{}^\prime00{}^{\prime\prime}$"
assert rlscangle.split("$")[1] in rlarrangle
# make sure the ... appears for large arrays
bigarrangle = Angle(np.ones(50000) / 50000.0, u.deg)
assert "..." in bigarrangle._repr_latex_()
def test_angle_with_cds_units_enabled():
"""Regression test for #5350
Especially the example in
https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
"""
# the problem is with the parser, so remove it temporarily
from astropy.coordinates.angle_formats import _AngleParser
from astropy.units import cds
del _AngleParser._thread_local._parser
with cds.enable():
Angle("5d")
del _AngleParser._thread_local._parser
Angle("5d")
def test_longitude_nan():
# Check that passing a NaN to Longitude doesn't raise a warning
Longitude([0, np.nan, 1] * u.deg)
def test_latitude_nan():
# Check that passing a NaN to Latitude doesn't raise a warning
Latitude([0, np.nan, 1] * u.deg)
def test_angle_wrap_at_nan():
# Check that no attempt is made to wrap a NaN angle
angle = Angle([0, np.nan, 1] * u.deg)
angle.flags.writeable = False # to force an error if a write is attempted
angle.wrap_at(180 * u.deg, inplace=True)
def test_angle_multithreading():
"""
Regression test for issue #7168
"""
angles = ["00:00:00"] * 10000
def parse_test(i=0):
Angle(angles, unit="hour")
for i in range(10):
threading.Thread(target=parse_test, args=(i,)).start()
@pytest.mark.parametrize("cls", [Angle, Longitude, Latitude])
@pytest.mark.parametrize(
"input, expstr, exprepr",
[
(np.nan * u.deg, "nan", "nan deg"),
([np.nan, 5, 0] * u.deg, "[nan 5d00m00s 0d00m00s]", "[nan, 5., 0.] deg"),
([6, np.nan, 0] * u.deg, "[6d00m00s nan 0d00m00s]", "[6., nan, 0.] deg"),
([np.nan, np.nan, np.nan] * u.deg, "[nan nan nan]", "[nan, nan, nan] deg"),
(np.nan * u.hour, "nan", "nan hourangle"),
([np.nan, 5, 0] * u.hour, "[nan 5h00m00s 0h00m00s]", "[nan, 5., 0.] hourangle"),
([6, np.nan, 0] * u.hour, "[6h00m00s nan 0h00m00s]", "[6., nan, 0.] hourangle"),
(
[np.nan, np.nan, np.nan] * u.hour,
"[nan nan nan]",
"[nan, nan, nan] hourangle",
),
(np.nan * u.rad, "nan", "nan rad"),
([np.nan, 1, 0] * u.rad, "[nan 1rad 0rad]", "[nan, 1., 0.] rad"),
([1.50, np.nan, 0] * u.rad, "[1.5rad nan 0rad]", "[1.5, nan, 0.] rad"),
([np.nan, np.nan, np.nan] * u.rad, "[nan nan nan]", "[nan, nan, nan] rad"),
],
)
def test_str_repr_angles_nan(cls, input, expstr, exprepr):
"""
Regression test for issue #11473
"""
q = cls(input)
assert str(q) == expstr
# Deleting whitespaces since repr appears to be adding them for some values
# making the test fail.
assert repr(q).replace(" ", "") == f"<{cls.__name__}{exprepr}>".replace(" ", "")
@pytest.mark.parametrize("sign", (-1, 1))
@pytest.mark.parametrize(
"value,expected_value,dtype,expected_dtype",
[
(np.pi / 2, np.pi / 2, None, np.float64),
(np.pi / 2, np.pi / 2, np.float64, np.float64),
(np.float32(np.pi / 2), np.float32(np.pi / 2), None, np.float32),
(np.float32(np.pi / 2), np.float32(np.pi / 2), np.float32, np.float32),
# these cases would require coercing the float32 value to the float64 value
# making validate have side effects, so it's not implemented for now
# (np.float32(np.pi / 2), np.pi / 2, np.float64, np.float64),
# (np.float32(-np.pi / 2), -np.pi / 2, np.float64, np.float64),
],
)
def test_latitude_limits(value, expected_value, dtype, expected_dtype, sign):
"""
Test that the validation of the Latitude value range in radians works
in both float32 and float64.
As discussed in issue #13708, before, the float32 representation of pi/2
was rejected as invalid because the comparison always used the float64
representation.
"""
# this prevents upcasting to float64 as sign * value would do
if sign < 0:
value = -value
expected_value = -expected_value
result = Latitude(value, u.rad, dtype=dtype)
assert result.value == expected_value
assert result.dtype == expected_dtype
assert result.unit == u.rad
@pytest.mark.parametrize(
"value,dtype",
[
(0.50001 * np.pi, np.float32),
(np.float32(0.50001 * np.pi), np.float32),
(0.50001 * np.pi, np.float64),
],
)
def test_latitude_out_of_limits(value, dtype):
"""
Test that values slightly larger than pi/2 are rejected for different dtypes.
Test cases for issue #13708
"""
with pytest.raises(ValueError, match=r"Latitude angle\(s\) must be within.*"):
Latitude(value, u.rad, dtype=dtype)
def test_angle_pickle_to_string():
"""
Ensure that after pickling we can still do to_string on hourangle.
Regression test for gh-13923.
"""
angle = Angle(0.25 * u.hourangle)
expected = angle.to_string()
via_pickle = pickle.loads(pickle.dumps(angle))
via_pickle_string = via_pickle.to_string() # This used to fail.
assert via_pickle_string == expected
|
ee237cfa3d27e6758f757cced4a8a12fd5882e355b9555aaf24f5cb0381bb323 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for coordinates-related bugs that don't have an obvious other
place to live
"""
import copy
import io
from contextlib import nullcontext
import numpy as np
import pytest
from erfa import ErfaWarning
from astropy import units as u
from astropy.coordinates import (
CIRS,
FK4,
GCRS,
HCRS,
ICRS,
ITRS,
AltAz,
BaseCoordinateFrame,
CartesianDifferential,
CartesianRepresentation,
CylindricalDifferential,
CylindricalRepresentation,
EarthLocation,
FK4NoETerms,
FunctionTransform,
GeocentricMeanEcliptic,
Latitude,
Longitude,
QuantityAttribute,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
get_body,
get_moon,
get_sun,
)
from astropy.coordinates.sites import get_builtin_sites
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils import iers
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test_regression_5085():
"""
PR #5085 was put in place to fix the following issue.
Issue: https://github.com/astropy/astropy/issues/5069
At root was the transformation of Ecliptic coordinates with
non-scalar times.
"""
# Note: for regression test, we need to be sure that we use UTC for the
# epoch, even though more properly that should be TT; but the "expected"
# values were calculated using that.
j2000 = Time("J2000", scale="utc")
times = Time(["2015-08-28 03:30", "2015-09-05 10:30", "2015-09-15 18:35"])
latitudes = Latitude([3.9807075, -5.00733806, 1.69539491] * u.deg)
longitudes = Longitude([311.79678613, 72.86626741, 199.58698226] * u.deg)
distances = u.Quantity([0.00243266, 0.0025424, 0.00271296] * u.au)
coo = GeocentricMeanEcliptic(
lat=latitudes, lon=longitudes, distance=distances, obstime=times, equinox=times
)
# expected result
ras = Longitude([310.50095400, 314.67109920, 319.56507428] * u.deg)
decs = Latitude([-18.25190443, -17.1556676, -15.71616522] * u.deg)
distances = u.Quantity([1.78309901, 1.710874, 1.61326649] * u.au)
expected_result = GCRS(
ra=ras, dec=decs, distance=distances, obstime=j2000
).cartesian.xyz
actual_result = coo.transform_to(GCRS(obstime=j2000)).cartesian.xyz
assert_quantity_allclose(expected_result, actual_result)
def test_regression_3920():
"""
Issue: https://github.com/astropy/astropy/issues/3920
"""
loc = EarthLocation.from_geodetic(0 * u.deg, 0 * u.deg, 0)
time = Time("2010-1-1")
aa = AltAz(location=loc, obstime=time)
sc = SkyCoord(10 * u.deg, 3 * u.deg)
assert sc.transform_to(aa).shape == tuple()
# That part makes sense: the input is a scalar so the output is too
sc2 = SkyCoord(10 * u.deg, 3 * u.deg, 1 * u.AU)
assert sc2.transform_to(aa).shape == tuple()
# in 3920 that assert fails, because the shape is (1,)
# check that the same behavior occurs even if transform is from low-level classes
icoo = ICRS(sc.data)
icoo2 = ICRS(sc2.data)
assert icoo.transform_to(aa).shape == tuple()
assert icoo2.transform_to(aa).shape == tuple()
def test_regression_3938():
"""
Issue: https://github.com/astropy/astropy/issues/3938
"""
# Set up list of targets - we don't use `from_name` here to avoid
# remote_data requirements, but it does the same thing
# vega = SkyCoord.from_name('Vega')
vega = SkyCoord(279.23473479 * u.deg, 38.78368896 * u.deg)
# capella = SkyCoord.from_name('Capella')
capella = SkyCoord(79.17232794 * u.deg, 45.99799147 * u.deg)
# sirius = SkyCoord.from_name('Sirius')
sirius = SkyCoord(101.28715533 * u.deg, -16.71611586 * u.deg)
targets = [vega, capella, sirius]
# Feed list of targets into SkyCoord
combined_coords = SkyCoord(targets)
# Set up AltAz frame
time = Time("2012-01-01 00:00:00")
location = EarthLocation("10d", "45d", 0)
aa = AltAz(location=location, obstime=time)
combined_coords.transform_to(aa)
# in 3938 the above yields ``UnitConversionError: '' (dimensionless) and 'pc' (length) are not convertible``
def test_regression_3998():
"""
Issue: https://github.com/astropy/astropy/issues/3998
"""
time = Time("2012-01-01 00:00:00")
assert time.isscalar
sun = get_sun(time)
assert sun.isscalar
# in 3998, the above yields False - `sun` is a length-1 vector
assert sun.obstime is time
def test_regression_4033():
"""
Issue: https://github.com/astropy/astropy/issues/4033
"""
# alb = SkyCoord.from_name('Albireo')
alb = SkyCoord(292.68033548 * u.deg, 27.95968007 * u.deg)
alb_wdist = SkyCoord(alb, distance=133 * u.pc)
# de = SkyCoord.from_name('Deneb')
de = SkyCoord(310.35797975 * u.deg, 45.28033881 * u.deg)
de_wdist = SkyCoord(de, distance=802 * u.pc)
aa = AltAz(
location=EarthLocation(lat=45 * u.deg, lon=0 * u.deg), obstime="2010-1-1"
)
deaa = de.transform_to(aa)
albaa = alb.transform_to(aa)
alb_wdistaa = alb_wdist.transform_to(aa)
de_wdistaa = de_wdist.transform_to(aa)
# these work fine
sepnod = deaa.separation(albaa)
sepwd = deaa.separation(alb_wdistaa)
assert_quantity_allclose(sepnod, 22.2862 * u.deg, rtol=1e-6)
assert_quantity_allclose(sepwd, 22.2862 * u.deg, rtol=1e-6)
# parallax should be present when distance added
assert np.abs(sepnod - sepwd) > 1 * u.marcsec
# in 4033, the following fail with a recursion error
assert_quantity_allclose(
de_wdistaa.separation(alb_wdistaa), 22.2862 * u.deg, rtol=1e-3
)
assert_quantity_allclose(alb_wdistaa.separation(deaa), 22.2862 * u.deg, rtol=1e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason="No Scipy")
def test_regression_4082():
"""
Issue: https://github.com/astropy/astropy/issues/4082
"""
from astropy.coordinates import search_around_3d, search_around_sky
cat = SkyCoord([10.076, 10.00455], [18.54746, 18.54896], unit="deg")
search_around_sky(cat[0:1], cat, seplimit=u.arcsec * 60, storekdtree=False)
# in the issue, this raises a TypeError
# also check 3d for good measure, although it's not really affected by this bug directly
cat3d = SkyCoord(
[10.076, 10.00455] * u.deg,
[18.54746, 18.54896] * u.deg,
distance=[0.1, 1.5] * u.kpc,
)
search_around_3d(cat3d[0:1], cat3d, 1 * u.kpc, storekdtree=False)
def test_regression_4210():
"""
Issue: https://github.com/astropy/astropy/issues/4210
Related PR with actual change: https://github.com/astropy/astropy/pull/4211
"""
crd = SkyCoord(0 * u.deg, 0 * u.deg, distance=1 * u.AU)
ecl = crd.geocentricmeanecliptic
# bug was that "lambda", which at the time was the name of the geocentric
# ecliptic longitude, is a reserved keyword. So this just makes sure the
# new name is are all valid
ecl.lon
# and for good measure, check the other ecliptic systems are all the same
# names for their attributes
from astropy.coordinates.builtin_frames import ecliptic
for frame_name in ecliptic.__all__:
eclcls = getattr(ecliptic, frame_name)
eclobj = eclcls(1 * u.deg, 2 * u.deg, 3 * u.AU)
eclobj.lat
eclobj.lon
eclobj.distance
def test_regression_futuretimes_4302():
"""
Checks that an error is not raised for future times not covered by IERS
tables (at least in a simple transform like CIRS->ITRS that simply requires
the UTC<->UT1 conversion).
Relevant comment: https://github.com/astropy/astropy/pull/4302#discussion_r44836531
"""
# this is an ugly hack to get the warning to show up even if it has already
# appeared
from astropy.coordinates.builtin_frames import utils
from astropy.utils.exceptions import AstropyWarning
if hasattr(utils, "__warningregistry__"):
utils.__warningregistry__.clear()
# check that out-of-range warning appears among any other warnings. If
# tests are run with --remote-data then the IERS table will be an instance
# of IERS_Auto which is assured of being "fresh". In this case getting
# times outside the range of the table does not raise an exception. Only
# if using IERS_B (which happens without --remote-data, i.e. for all CI
# testing) do we expect another warning.
if isinstance(iers.earth_orientation_table.get(), iers.IERS_B):
ctx = pytest.warns(
AstropyWarning,
match=r"\(some\) times are outside of range covered by IERS table.*",
)
else:
ctx = nullcontext()
with ctx:
future_time = Time("2511-5-1")
c = CIRS(1 * u.deg, 2 * u.deg, obstime=future_time)
c.transform_to(ITRS(obstime=future_time))
def test_regression_4996():
# this part is the actual regression test
deltat = np.linspace(-12, 12, 1000) * u.hour
times = Time("2012-7-13 00:00:00") + deltat
suncoo = get_sun(times)
assert suncoo.shape == (len(times),)
# and this is an additional test to make sure more complex arrays work
times2 = Time("2012-7-13 00:00:00") + deltat.reshape(10, 20, 5)
suncoo2 = get_sun(times2)
assert suncoo2.shape == times2.shape
# this is intentionally not allclose - they should be *exactly* the same
assert np.all(suncoo.ra.ravel() == suncoo2.ra.ravel())
def test_regression_4293():
"""Really just an extra test on FK4 no e, after finding that the units
were not always taken correctly. This test is against explicitly doing
the transformations on pp170 of Explanatory Supplement to the Astronomical
Almanac (Seidelmann, 2005).
See https://github.com/astropy/astropy/pull/4293#issuecomment-234973086
"""
# Check all over sky, but avoiding poles (note that FK4 did not ignore
# e terms within 10∘ of the poles... see p170 of explan.supp.).
ra, dec = np.meshgrid(np.arange(0, 359, 45), np.arange(-80, 81, 40))
fk4 = FK4(ra.ravel() * u.deg, dec.ravel() * u.deg)
Dc = -0.065838 * u.arcsec
Dd = +0.335299 * u.arcsec
# Dc * tan(obliquity), as given on p.170
Dctano = -0.028553 * u.arcsec
fk4noe_dec = (
fk4.dec
- (Dd * np.cos(fk4.ra) - Dc * np.sin(fk4.ra)) * np.sin(fk4.dec)
- Dctano * np.cos(fk4.dec)
)
fk4noe_ra = fk4.ra - (Dc * np.cos(fk4.ra) + Dd * np.sin(fk4.ra)) / np.cos(fk4.dec)
fk4noe = fk4.transform_to(FK4NoETerms())
# Tolerance here just set to how well the coordinates match, which is much
# better than the claimed accuracy of <1 mas for this first-order in
# v_earth/c approximation.
# Interestingly, if one divides by np.cos(fk4noe_dec) in the ra correction,
# the match becomes good to 2 μas.
assert_quantity_allclose(fk4noe.ra, fk4noe_ra, atol=11.0 * u.uas, rtol=0)
assert_quantity_allclose(fk4noe.dec, fk4noe_dec, atol=3.0 * u.uas, rtol=0)
def test_regression_4926():
times = Time("2010-01-1") + np.arange(20) * u.day
green = get_builtin_sites()["greenwich"]
# this is the regression test
moon = get_moon(times, green)
# this is an additional test to make sure the GCRS->ICRS transform works for complex shapes
moon.transform_to(ICRS())
# and some others to increase coverage of transforms
moon.transform_to(HCRS(obstime="J2000"))
moon.transform_to(HCRS(obstime=times))
def test_regression_5209():
"check that distances are not lost on SkyCoord init"
time = Time("2015-01-01")
moon = get_moon(time)
new_coord = SkyCoord([moon])
assert_quantity_allclose(new_coord[0].distance, moon.distance)
def test_regression_5133():
N = 1000
np.random.seed(12345)
lon = np.random.uniform(-10, 10, N) * u.deg
lat = np.random.uniform(50, 52, N) * u.deg
alt = np.random.uniform(0, 10.0, N) * u.km
time = Time("2010-1-1")
objects = EarthLocation.from_geodetic(lon, lat, height=alt)
itrs_coo = objects.get_itrs(time)
homes = [
EarthLocation.from_geodetic(lon=-1 * u.deg, lat=52 * u.deg, height=h)
for h in (0, 1000, 10000) * u.km
]
altaz_frames = [AltAz(obstime=time, location=h) for h in homes]
altaz_coos = [itrs_coo.transform_to(f) for f in altaz_frames]
# they should all be different
for coo in altaz_coos[1:]:
assert not quantity_allclose(coo.az, coo.az[0])
assert not quantity_allclose(coo.alt, coo.alt[0])
def test_itrs_vals_5133():
"""
Test to check if alt-az calculations respect height of observer
Because ITRS is geocentric and includes aberration, an object that
appears 'straight up' to a geocentric observer (ITRS) won't be
straight up to a topocentric observer - see
https://github.com/astropy/astropy/issues/10983
This is worse for small height above the Earth, which is why this test
uses large distances.
"""
time = Time("2010-1-1")
height = 500000.0 * u.km
el = EarthLocation.from_geodetic(lon=20 * u.deg, lat=45 * u.deg, height=height)
lons = [20, 30, 20] * u.deg
lats = [44, 45, 45] * u.deg
alts = u.Quantity([height, height, 10 * height])
coos = [
EarthLocation.from_geodetic(lon, lat, height=alt).get_itrs(time)
for lon, lat, alt in zip(lons, lats, alts)
]
aaf = AltAz(obstime=time, location=el)
aacs = [coo.transform_to(aaf) for coo in coos]
assert all([coo.isscalar for coo in aacs])
# the ~1 degree tolerance is b/c aberration makes it not exact
assert_quantity_allclose(aacs[0].az, 180 * u.deg, atol=1 * u.deg)
assert aacs[0].alt < 0 * u.deg
assert aacs[0].distance > 5000 * u.km
# it should *not* actually be 90 degrees, b/c constant latitude is not
# straight east anywhere except the equator... but should be close-ish
assert_quantity_allclose(aacs[1].az, 90 * u.deg, atol=5 * u.deg)
assert aacs[1].alt < 0 * u.deg
assert aacs[1].distance > 5000 * u.km
assert_quantity_allclose(aacs[2].alt, 90 * u.deg, atol=1 * u.arcminute)
assert_quantity_allclose(aacs[2].distance, 9 * height)
def test_regression_simple_5133():
"""
Simple test to check if alt-az calculations respect height of observer
Because ITRS is geocentric and includes aberration, an object that
appears 'straight up' to a geocentric observer (ITRS) won't be
straight up to a topocentric observer - see
https://github.com/astropy/astropy/issues/10983
This is why we construct a topocentric GCRS SkyCoord before calculating AltAz
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=[10.0, 0.0] * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=5.0 * u.km)
obsloc_gcrs, obsvel_gcrs = home.get_gcrs_posvel(t)
gcrs_geo = obj.get_itrs(t).transform_to(GCRS(obstime=t))
obsrepr = home.get_itrs(t).transform_to(GCRS(obstime=t)).cartesian
topo_gcrs_repr = gcrs_geo.cartesian - obsrepr
topocentric_gcrs_frame = GCRS(
obstime=t, obsgeoloc=obsloc_gcrs, obsgeovel=obsvel_gcrs
)
gcrs_topo = topocentric_gcrs_frame.realize_frame(topo_gcrs_repr)
aa = gcrs_topo.transform_to(AltAz(obstime=t, location=home))
# az is more-or-less undefined for straight up or down
assert_quantity_allclose(aa.alt, [90, -90] * u.deg, rtol=1e-7)
assert_quantity_allclose(aa.distance, 5 * u.km)
def test_regression_5743():
sc = SkyCoord(
[5, 10], [20, 30], unit=u.deg, obstime=["2017-01-01T00:00", "2017-01-01T00:10"]
)
assert sc[0].obstime.shape == tuple()
def test_regression_5889_5890():
# ensure we can represent all Representations and transform to ND frames
greenwich = EarthLocation(
*u.Quantity([3980608.90246817, -102.47522911, 4966861.27310067], unit=u.m)
)
times = Time("2017-03-20T12:00:00") + np.linspace(-2, 2, 3) * u.hour
moon = get_moon(times, location=greenwich)
targets = SkyCoord([350.7 * u.deg, 260.7 * u.deg], [18.4 * u.deg, 22.4 * u.deg])
targs2d = targets[:, np.newaxis]
targs2d.transform_to(moon)
def test_regression_6236():
# sunpy changes its representation upon initialisation of a frame,
# including via `realize_frame`. Ensure this works.
class MyFrame(BaseCoordinateFrame):
default_representation = CartesianRepresentation
my_attr = QuantityAttribute(default=0, unit=u.m)
class MySpecialFrame(MyFrame):
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get("representation_type", None)
super().__init__(*args, **kwargs)
if not _rep_kwarg:
self.representation_type = self.default_representation
self._data = self.data.represent_as(self.representation_type)
rep1 = UnitSphericalRepresentation([0.0, 1] * u.deg, [2.0, 3.0] * u.deg)
rep2 = SphericalRepresentation(
[10.0, 11] * u.deg, [12.0, 13.0] * u.deg, [14.0, 15.0] * u.kpc
)
mf1 = MyFrame(rep1, my_attr=1.0 * u.km)
mf2 = mf1.realize_frame(rep2)
# Normally, data is stored as is, but the representation gets set to a
# default, even if a different representation instance was passed in.
# realize_frame should do the same. Just in case, check attrs are passed.
assert mf1.data is rep1
assert mf2.data is rep2
assert mf1.representation_type is CartesianRepresentation
assert mf2.representation_type is CartesianRepresentation
assert mf2.my_attr == mf1.my_attr
# It should be independent of whether I set the representation explicitly
mf3 = MyFrame(rep1, my_attr=1.0 * u.km, representation_type="unitspherical")
mf4 = mf3.realize_frame(rep2)
assert mf3.data is rep1
assert mf4.data is rep2
assert mf3.representation_type is UnitSphericalRepresentation
assert mf4.representation_type is CartesianRepresentation
assert mf4.my_attr == mf3.my_attr
# This should be enough to help sunpy, but just to be sure, a test
# even closer to what is done there, i.e., transform the representation.
msf1 = MySpecialFrame(rep1, my_attr=1.0 * u.km)
msf2 = msf1.realize_frame(rep2)
assert msf1.data is not rep1 # Gets transformed to Cartesian.
assert msf2.data is not rep2
assert type(msf1.data) is CartesianRepresentation
assert type(msf2.data) is CartesianRepresentation
assert msf1.representation_type is CartesianRepresentation
assert msf2.representation_type is CartesianRepresentation
assert msf2.my_attr == msf1.my_attr
# And finally a test where the input is not transformed.
msf3 = MySpecialFrame(rep1, my_attr=1.0 * u.km, representation_type="unitspherical")
msf4 = msf3.realize_frame(rep2)
assert msf3.data is rep1
assert msf4.data is not rep2
assert msf3.representation_type is UnitSphericalRepresentation
assert msf4.representation_type is CartesianRepresentation
assert msf4.my_attr == msf3.my_attr
@pytest.mark.skipif(not HAS_SCIPY, reason="No Scipy")
def test_regression_6347():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = SkyCoord([1.1, 2.1] * u.deg, [3.1, 4.1] * u.deg)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_sky(sc2, 10 * u.arcmin)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_sky(sc2, 1 * u.arcmin)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_sky(sc2, 10 * u.arcmin)
assert len(d2d_10) == 2
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
@pytest.mark.skipif(not HAS_SCIPY, reason="No Scipy")
def test_regression_6347_3d():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, [5, 6] * u.kpc)
sc2 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, [5.1, 6.1] * u.kpc)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_3d(sc2, 500 * u.pc)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_3d(sc2, 50 * u.pc)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_3d(sc2, 500 * u.pc)
assert len(d2d_10) > 0
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
def test_gcrs_itrs_cartesian_repr():
# issue 6436: transformation failed if coordinate representation was
# Cartesian
gcrs = GCRS(
CartesianRepresentation((859.07256, -4137.20368, 5295.56871), unit="km"),
representation_type="cartesian",
)
gcrs.transform_to(ITRS())
def test_regression_6446():
# this succeeds even before 6446:
sc1 = SkyCoord([1, 2], [3, 4], unit="deg")
t1 = Table([sc1])
sio1 = io.StringIO()
t1.write(sio1, format="ascii.ecsv")
# but this fails due to the 6446 bug
c1 = SkyCoord(1, 3, unit="deg")
c2 = SkyCoord(2, 4, unit="deg")
sc2 = SkyCoord([c1, c2])
t2 = Table([sc2])
sio2 = io.StringIO()
t2.write(sio2, format="ascii.ecsv")
assert sio1.getvalue() == sio2.getvalue()
def test_regression_6597():
frame_name = "galactic"
c1 = SkyCoord(1, 3, unit="deg", frame=frame_name)
c2 = SkyCoord(2, 4, unit="deg", frame=frame_name)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame_name
def test_regression_6597_2():
"""
This tests the more subtle flaw that #6597 indirectly uncovered: that even
in the case that the frames are ra/dec, they still might be the wrong *kind*
"""
frame = FK4(equinox="J1949")
c1 = SkyCoord(1, 3, unit="deg", frame=frame)
c2 = SkyCoord(2, 4, unit="deg", frame=frame)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame.name
def test_regression_6697():
"""
Test for regression of a bug in get_gcrs_posvel that introduced errors at the 1m/s level.
Comparison data is derived from calculation in PINT
https://github.com/nanograv/PINT/blob/master/pint/erfautils.py
"""
pint_vels = CartesianRepresentation(
348.63632871, -212.31704928, -0.60154936, unit=u.m / u.s
)
location = EarthLocation(
5327448.9957829, -1718665.73869569, 3051566.90295403, unit=u.m
)
t = Time(2458036.161966612, format="jd")
obsgeopos, obsgeovel = location.get_gcrs_posvel(t)
delta = (obsgeovel - pint_vels).norm()
assert delta < 1 * u.cm / u.s
def test_regression_8138():
sc = SkyCoord(1 * u.deg, 2 * u.deg)
newframe = GCRS()
sc2 = sc.transform_to(newframe)
assert newframe.is_equivalent_frame(sc2.frame)
def test_regression_8276():
from astropy.coordinates import baseframe
class MyFrame(BaseCoordinateFrame):
a = QuantityAttribute(unit=u.m)
# we save the transform graph so that it doesn't accidentally mess with other tests
old_transform_graph = baseframe.frame_transform_graph
try:
baseframe.frame_transform_graph = copy.copy(baseframe.frame_transform_graph)
# as reported in 8276, this previously failed right here because
# registering the transform tries to create a frame attribute
@baseframe.frame_transform_graph.transform(FunctionTransform, MyFrame, AltAz)
def trans(my_frame_coord, altaz_frame):
pass
# should also be able to *create* the Frame at this point
MyFrame()
finally:
baseframe.frame_transform_graph = old_transform_graph
def test_regression_8615():
# note this is a "higher-level" symptom of the problem that a test now moved
# to pyerfa (erfa/tests/test_erfa:test_float32_input) is testing for, but we keep
# it here as well due to being a more practical version of the issue.
crf = CartesianRepresentation(np.array([3, 0, 4], dtype=float) * u.pc)
srf = SphericalRepresentation.from_cartesian(crf) # does not error in 8615
cr = CartesianRepresentation(np.array([3, 0, 4], dtype="f4") * u.pc)
sr = SphericalRepresentation.from_cartesian(cr) # errors in 8615
assert_quantity_allclose(sr.distance, 5 * u.pc)
assert_quantity_allclose(srf.distance, 5 * u.pc)
def test_regression_8924():
"""This checks that the ValueError in
BaseRepresentation._re_represent_differentials is raised properly
"""
# A case where the representation has a 's' differential, but we try to
# re-represent only with an 's2' differential
rep = CartesianRepresentation(1, 2, 3, unit=u.kpc)
dif = CartesianDifferential(4, 5, 6, u.km / u.s)
rep = rep.with_differentials(dif)
with pytest.raises(ValueError):
rep._re_represent_differentials(
CylindricalRepresentation, {"s2": CylindricalDifferential}
)
def test_regression_10092():
"""
Check that we still get a proper motion even for SkyCoords without distance
"""
c = SkyCoord(
l=10 * u.degree,
b=45 * u.degree,
pm_l_cosb=34 * u.mas / u.yr,
pm_b=-117 * u.mas / u.yr,
frame="galactic",
obstime=Time("1988-12-18 05:11:23.5"),
)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
newc = c.apply_space_motion(dt=10 * u.year)
assert_quantity_allclose(
newc.pm_l_cosb, 33.99980714 * u.mas / u.yr, atol=1.0e-5 * u.mas / u.yr
)
def test_regression_10226():
# Dictionary representation of SkyCoord should contain differentials.
sc = SkyCoord(
[270, 280] * u.deg,
[30, 35] * u.deg,
[10, 11] * u.pc,
radial_velocity=[20, -20] * u.km / u.s,
)
sc_as_dict = sc.info._represent_as_dict()
assert "radial_velocity" in sc_as_dict
# But only the components that have been specified.
assert "pm_dec" not in sc_as_dict
@pytest.mark.parametrize(
"mjd", (52000, [52000], [[52000]], [52001, 52002], [[52001], [52002]])
)
def test_regression_10422(mjd):
"""
Check that we can get a GCRS for a scalar EarthLocation and a
size=1 non-scalar Time.
"""
# Avoid trying to download new IERS data.
with iers.earth_orientation_table.set(iers.IERS_B.open(iers.IERS_B_FILE)):
t = Time(mjd, format="mjd", scale="tai")
loc = EarthLocation(88258.0 * u.m, -4924882.2 * u.m, 3943729.0 * u.m)
p, v = loc.get_gcrs_posvel(obstime=t)
assert p.shape == v.shape == t.shape
@pytest.mark.remote_data
def test_regression_10291():
"""
According to https://eclipse.gsfc.nasa.gov/OH/transit12.html,
the minimum separation between Venus and the Sun during the 2012
transit is 554 arcseconds for an observer at the Geocenter.
If light deflection from the Sun is incorrectly applied, this increases
to 557 arcseconds.
"""
t = Time("2012-06-06 01:29:36")
sun = get_body("sun", t)
venus = get_body("venus", t)
assert_quantity_allclose(
venus.separation(sun), 554.427 * u.arcsecond, atol=0.001 * u.arcsecond
)
|
7d1f00f9d124edc42f4b6a5d0a3de03c17810716114bc29711bd905d8a615bdc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the SkyCoord class. Note that there are also SkyCoord tests in
test_api_ape5.py
"""
import copy
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pytest
from erfa import ErfaWarning
from astropy import units as u
from astropy.coordinates import (
FK4,
FK5,
GCRS,
ICRS,
AltAz,
Angle,
Attribute,
BaseCoordinateFrame,
CartesianRepresentation,
EarthLocation,
Galactic,
Latitude,
RepresentationMapping,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
frame_transform_graph,
)
from astropy.coordinates.representation import (
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
)
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.coordinates.transformations import FunctionTransform
from astropy.io import fits
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils import isiterable
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.wcs import WCS
RA = 1.0 * u.deg
DEC = 2.0 * u.deg
C_ICRS = ICRS(RA, DEC)
C_FK5 = C_ICRS.transform_to(FK5())
J2001 = Time("J2001")
def allclose(a, b, rtol=0.0, atol=None):
if atol is None:
atol = 1.0e-8 * getattr(a, "unit", 1.0)
return quantity_allclose(a, b, rtol, atol)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def test_is_transformable_to_str_input():
"""Test method ``is_transformable_to`` with string input.
The only difference from the frame method of the same name is that
strings are allowed. As the frame tests cover ``is_transform_to``, here
we only test the added string option.
"""
# make example SkyCoord
c = SkyCoord(90 * u.deg, -11 * u.deg)
# iterate through some frames, checking consistency
names = frame_transform_graph.get_names()
for name in names:
frame = frame_transform_graph.lookup_name(name)()
assert c.is_transformable_to(name) == c.is_transformable_to(frame)
def test_transform_to():
for frame in (
FK5(),
FK5(equinox=Time("J1975.0")),
FK4(),
FK4(equinox=Time("J1975.0")),
SkyCoord(RA, DEC, frame="fk4", equinox="J1980"),
):
c_frame = C_ICRS.transform_to(frame)
s_icrs = SkyCoord(RA, DEC, frame="icrs")
s_frame = s_icrs.transform_to(frame)
assert allclose(c_frame.ra, s_frame.ra)
assert allclose(c_frame.dec, s_frame.dec)
assert allclose(c_frame.distance, s_frame.distance)
# set up for parametrized test
rt_sets = []
rt_frames = [ICRS, FK4, FK5, Galactic]
for rt_frame0 in rt_frames:
for rt_frame1 in rt_frames:
for equinox0 in (None, "J1975.0"):
for obstime0 in (None, "J1980.0"):
for equinox1 in (None, "J1975.0"):
for obstime1 in (None, "J1980.0"):
rt_sets.append(
(
rt_frame0,
rt_frame1,
equinox0,
equinox1,
obstime0,
obstime1,
)
)
rt_args = ("frame0", "frame1", "equinox0", "equinox1", "obstime0", "obstime1")
@pytest.mark.parametrize(rt_args, rt_sets)
def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1):
"""
Test round tripping out and back using transform_to in every combination.
"""
attrs0 = {"equinox": equinox0, "obstime": obstime0}
attrs1 = {"equinox": equinox1, "obstime": obstime1}
# Remove None values
attrs0 = {k: v for k, v in attrs0.items() if v is not None}
attrs1 = {k: v for k, v in attrs1.items() if v is not None}
# Go out and back
sc = SkyCoord(RA, DEC, frame=frame0, **attrs0)
# Keep only frame attributes for frame1
attrs1 = {
attr: val for attr, val in attrs1.items() if attr in frame1.frame_attributes
}
sc2 = sc.transform_to(frame1(**attrs1))
# When coming back only keep frame0 attributes for transform_to
attrs0 = {
attr: val for attr, val in attrs0.items() if attr in frame0.frame_attributes
}
# also, if any are None, fill in with defaults
for attrnm in frame0.frame_attributes:
if attrs0.get(attrnm, None) is None:
if attrnm == "obstime" and frame0.get_frame_attr_defaults()[attrnm] is None:
if "equinox" in attrs0:
attrs0[attrnm] = attrs0["equinox"]
else:
attrs0[attrnm] = frame0.get_frame_attr_defaults()[attrnm]
sc_rt = sc2.transform_to(frame0(**attrs0))
if frame0 is Galactic:
assert allclose(sc.l, sc_rt.l)
assert allclose(sc.b, sc_rt.b)
else:
assert allclose(sc.ra, sc_rt.ra)
assert allclose(sc.dec, sc_rt.dec)
if equinox0:
assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox
if obstime0:
assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime
def test_coord_init_string():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord("1d 2d")
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord("1d", "2d")
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord("1°2′3″", "2°3′4″")
assert allclose(sc.ra, Angle("1°2′3″"))
assert allclose(sc.dec, Angle("2°3′4″"))
sc = SkyCoord("1°2′3″ 2°3′4″")
assert allclose(sc.ra, Angle("1°2′3″"))
assert allclose(sc.dec, Angle("2°3′4″"))
with pytest.raises(ValueError) as err:
SkyCoord("1d 2d 3d")
assert "Cannot parse first argument data" in str(err.value)
sc1 = SkyCoord("8 00 00 +5 00 00.0", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc1, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc11 = SkyCoord("8h00m00s+5d00m00.0s", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc11, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc2 = SkyCoord("8 00 -5 00 00.0", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc2, SkyCoord)
assert allclose(sc2.ra, Angle(120 * u.deg))
assert allclose(sc2.dec, Angle(-5 * u.deg))
sc3 = SkyCoord("8 00 -5 00.6", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc3, SkyCoord)
assert allclose(sc3.ra, Angle(120 * u.deg))
assert allclose(sc3.dec, Angle(-5.01 * u.deg))
sc4 = SkyCoord("J080000.00-050036.00", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc4, SkyCoord)
assert allclose(sc4.ra, Angle(120 * u.deg))
assert allclose(sc4.dec, Angle(-5.01 * u.deg))
sc41 = SkyCoord("J080000+050036", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc41, SkyCoord)
assert allclose(sc41.ra, Angle(120 * u.deg))
assert allclose(sc41.dec, Angle(+5.01 * u.deg))
sc5 = SkyCoord("8h00.6m -5d00.6m", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc5, SkyCoord)
assert allclose(sc5.ra, Angle(120.15 * u.deg))
assert allclose(sc5.dec, Angle(-5.01 * u.deg))
sc6 = SkyCoord("8h00.6m -5d00.6m", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc6, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord("8h00.6m-5d00.6m", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord("8h00.6-5d00.6", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg)
assert isinstance(sc7, SkyCoord)
assert allclose(sc7.ra, Angle(187.706 * u.deg))
assert allclose(sc7.dec, Angle(12.406 * u.deg))
with pytest.raises(ValueError):
SkyCoord("8 00 -5 00.6", unit=(u.deg, u.deg), frame="galactic")
def test_coord_init_unit():
"""
Test variations of the unit keyword.
"""
for unit in (
"deg",
"deg,deg",
" deg , deg ",
u.deg,
(u.deg, u.deg),
np.array(["deg", "deg"]),
):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(1 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in (
"hourangle",
"hourangle,hourangle",
" hourangle , hourangle ",
u.hourangle,
[u.hourangle, u.hourangle],
):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(30 * u.deg))
for unit in ("hourangle,deg", (u.hourangle, u.deg)):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ("deg,deg,deg,deg", [u.deg, u.deg, u.deg, u.deg], None):
with pytest.raises(ValueError) as err:
SkyCoord(1, 2, unit=unit)
assert "Unit keyword must have one to three unit values" in str(err.value)
for unit in ("m", (u.m, u.deg), ""):
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, unit=unit)
def test_coord_init_list():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord(
[("1d", "2d"), (1 * u.deg, 2 * u.deg), "1d 2d", ("1°", "2°"), "1° 2°"],
unit="deg",
)
assert allclose(sc.ra, Angle("1d"))
assert allclose(sc.dec, Angle("2d"))
with pytest.raises(ValueError) as err:
SkyCoord(["1d 2d 3d"])
assert "Cannot parse first argument data" in str(err.value)
with pytest.raises(ValueError) as err:
SkyCoord([("1d", "2d", "3d")])
assert "Cannot parse first argument data" in str(err.value)
sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg])
assert allclose(sc.ra, Angle("1d"))
assert allclose(sc.dec, Angle("2d"))
with pytest.raises(
ValueError,
match="One or more elements of input sequence does not have a length",
):
SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec
def test_coord_init_array():
"""
Input in the form of a list array or numpy array
"""
for a in (["1 2", "3 4"], [["1", "2"], ["3", "4"]], [[1, 2], [3, 4]]):
sc = SkyCoord(a, unit="deg")
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
sc = SkyCoord(np.array(a), unit="deg")
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
def test_coord_init_representation():
"""
Spherical or Cartesian representation input coordinates.
"""
coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc)
sc = SkyCoord(coord, frame="icrs")
assert allclose(sc.ra, coord.lon)
assert allclose(sc.dec, coord.lat)
assert allclose(sc.distance, coord.distance)
with pytest.raises(ValueError) as err:
SkyCoord(coord, frame="icrs", ra="1d")
assert "conflicts with keyword argument 'ra'" in str(err.value)
coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one)
sc = SkyCoord(coord, frame="icrs")
sc_cart = sc.represent_as(CartesianRepresentation)
assert allclose(sc_cart.x, 1.0)
assert allclose(sc_cart.y, 2.0)
assert allclose(sc_cart.z, 3.0)
def test_frame_init():
"""
Different ways of providing the frame.
"""
sc = SkyCoord(RA, DEC, frame="icrs")
assert sc.frame.name == "icrs"
sc = SkyCoord(RA, DEC, frame=ICRS)
assert sc.frame.name == "icrs"
sc = SkyCoord(sc)
assert sc.frame.name == "icrs"
sc = SkyCoord(C_ICRS)
assert sc.frame.name == "icrs"
SkyCoord(C_ICRS, frame="icrs")
assert sc.frame.name == "icrs"
with pytest.raises(ValueError) as err:
SkyCoord(C_ICRS, frame="galactic")
assert "Cannot override frame=" in str(err.value)
def test_equal():
obstime = "B1955"
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = SkyCoord([1, 20] * u.deg, [3, 4] * u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 2] * u.km / u.s)
sc2 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 20] * u.km / u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
def test_equal_different_type():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime="B1955")
# Test equals and not equals operators against different types
assert sc1 != "a string"
assert not (sc1 == "a string")
def test_equal_exceptions():
sc1 = SkyCoord(1 * u.deg, 2 * u.deg, obstime="B1955")
sc2 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(
ValueError,
match=(
"cannot compare: extra frame attribute 'obstime' is not equivalent"
r" \(perhaps compare the frames directly to avoid this exception\)"
),
):
sc1 == sc2
# Note that this exception is the only one raised directly in SkyCoord.
# All others come from lower-level classes and are tested in test_frames.py.
def test_attr_inheritance():
"""
When initializing from an existing coord the representation attrs like
equinox should be inherited to the SkyCoord. If there is a conflict
then raise an exception.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults
assert sc2.equinox != sc.equinox
assert sc2.obstime != sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
@pytest.mark.parametrize("frame", ["fk4", "fk5", "icrs"])
def test_setitem_no_velocity(frame):
"""Test different flavors of item setting for a SkyCoord without a velocity
for different frames. Include a frame attribute that is sometimes an
actual frame attribute and sometimes an extra frame attribute.
"""
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime="B1955", frame=frame)
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg, obstime="B1955", frame=frame)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == Time("B1955")
assert sc1.frame.name == frame
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
def test_setitem_initially_broadcast():
sc = SkyCoord(np.ones((2, 1)) * u.deg, np.ones((1, 3)) * u.deg)
sc[1, 1] = SkyCoord(0 * u.deg, 0 * u.deg)
expected = np.ones((2, 3)) * u.deg
expected[1, 1] = 0.0
assert np.all(sc.ra == expected)
assert np.all(sc.dec == expected)
def test_setitem_velocities():
"""Test different flavors of item setting for a SkyCoord with a velocity."""
sc0 = SkyCoord(
[1, 2] * u.deg,
[3, 4] * u.deg,
radial_velocity=[1, 2] * u.km / u.s,
obstime="B1950",
frame="fk4",
)
sc2 = SkyCoord(
[10, 20] * u.deg,
[30, 40] * u.deg,
radial_velocity=[10, 20] * u.km / u.s,
obstime="B1950",
frame="fk4",
)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == Time("B1950")
assert sc1.frame.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
class SkyCoordSub(SkyCoord):
pass
obstime = "B1955"
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, frame="fk4")
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg, frame="fk4", obstime=obstime)
sc1 = SkyCoordSub(sc0)
with pytest.raises(
TypeError,
match="an only set from object of same class: SkyCoordSub vs. SkyCoord",
):
sc1[0] = sc2[0]
sc1 = SkyCoord(sc0.ra, sc0.dec, frame="fk4", obstime="B2001")
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1.frame[0] = sc2.frame[0]
sc1 = SkyCoord(sc0.ra[0], sc0.dec[0], frame="fk4", obstime=obstime)
with pytest.raises(
TypeError, match="scalar 'FK4' frame object does not support item assignment"
):
sc1[0] = sc2[0]
# Different differentials
sc1 = SkyCoord(
[1, 2] * u.deg,
[3, 4] * u.deg,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
)
sc2 = SkyCoord(
[10, 20] * u.deg, [30, 40] * u.deg, radial_velocity=[10, 20] * u.km / u.s
)
with pytest.raises(
TypeError,
match=(
"can only set from object of same class: "
"UnitSphericalCosLatDifferential vs. RadialDifferential"
),
):
sc1[0] = sc2[0]
def test_insert():
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc1 = SkyCoord(5 * u.deg, 6 * u.deg)
sc3 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc5 = SkyCoord([[10, 2], [30, 4]] * u.deg, [[50, 6], [70, 8]] * u.deg)
# Insert a scalar
sc = sc0.insert(1, sc1)
assert skycoord_equal(sc, SkyCoord([1, 5, 2] * u.deg, [3, 6, 4] * u.deg))
# Insert length=2 array at start of array
sc = sc0.insert(0, sc3)
assert skycoord_equal(sc, SkyCoord([10, 20, 1, 2] * u.deg, [30, 40, 3, 4] * u.deg))
# Insert length=2 array at end of array
sc = sc0.insert(2, sc3)
assert skycoord_equal(sc, SkyCoord([1, 2, 10, 20] * u.deg, [3, 4, 30, 40] * u.deg))
# Multidimensional
sc = sc4.insert(1, sc5)
assert skycoord_equal(
sc,
SkyCoord(
[[1, 2], [10, 2], [30, 4], [3, 4]] * u.deg,
[[5, 6], [50, 6], [70, 8], [7, 8]] * u.deg,
),
)
def test_insert_exceptions():
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc1 = SkyCoord(5 * u.deg, 6 * u.deg)
# sc3 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
with pytest.raises(TypeError, match="cannot insert into scalar"):
sc1.insert(0, sc0)
with pytest.raises(ValueError, match="axis must be 0"):
sc0.insert(0, sc1, axis=1)
with pytest.raises(TypeError, match="obj arg must be an integer"):
sc0.insert(slice(None), sc0)
with pytest.raises(
IndexError, match="index -100 is out of bounds for axis 0 with size 2"
):
sc0.insert(-100, sc0)
# Bad shape
with pytest.raises(
ValueError,
match=r"could not broadcast input array from shape \(2,2\) into shape \(2,?\)",
):
sc0.insert(0, sc4)
def test_attr_conflicts():
"""
Check conflicts resolution between coordinate attributes and init kwargs.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox="J1999", obstime="J2001")
# OK because sc.frame doesn't have obstime
SkyCoord(sc.frame, equinox="J1999", obstime="J2100")
# Not OK if attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox="J1999", obstime="J2002")
assert "Coordinate attribute 'obstime'=" in str(err.value)
# Same game but with fk4 which has equinox and obstime frame attrs
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox="J1999", obstime="J2001")
# Not OK if SkyCoord attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox="J1999", obstime="J2002")
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
# Not OK because sc.frame has different attrs
with pytest.raises(ValueError) as err:
SkyCoord(sc.frame, equinox="J1999", obstime="J2002")
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
def test_frame_attr_getattr():
"""
When accessing frame attributes like equinox, the value should come
from self.frame when that object has the relevant attribute, otherwise
from self.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
assert sc.equinox == "J1999" # Just the raw value (not validated)
assert sc.obstime == "J2001"
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
assert sc.equinox == Time("J1999") # Coming from the self.frame object
assert sc.obstime == Time("J2001")
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999")
assert sc.equinox == Time("J1999")
assert sc.obstime == Time("J1999")
def test_to_string():
"""
Basic testing of converting SkyCoord to strings. This just tests
for a single input coordinate and and 1-element list. It does not
test the underlying `Angle.to_string` method itself.
"""
coord = "1h2m3s 1d2m3s"
for wrap in (lambda x: x, lambda x: [x]):
sc = SkyCoord(wrap(coord))
assert sc.to_string() == wrap("15.5125 1.03417")
assert sc.to_string("dms") == wrap("15d30m45s 1d02m03s")
assert sc.to_string("hmsdms") == wrap("01h02m03s +01d02m03s")
with_kwargs = sc.to_string("hmsdms", precision=3, pad=True, alwayssign=True)
assert with_kwargs == wrap("+01h02m03.000s +01d02m03.000s")
@pytest.mark.parametrize("cls_other", [SkyCoord, ICRS])
def test_seps(cls_other):
sc1 = SkyCoord(0 * u.deg, 1 * u.deg)
sc2 = cls_other(0 * u.deg, 2 * u.deg)
sep = sc1.separation(sc2)
assert (sep - 1 * u.deg) / u.deg < 1e-10
with pytest.raises(ValueError):
sc1.separation_3d(sc2)
sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc)
sc4 = cls_other(1 * u.deg, 1 * u.deg, distance=2 * u.kpc)
sep3d = sc3.separation_3d(sc4)
assert sep3d == 1 * u.kpc
def test_repr():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame="icrs")
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame="icrs", distance=1 * u.kpc)
assert repr(sc1) == "<SkyCoord (ICRS): (ra, dec) in deg\n (0., 1.)>"
assert (
repr(sc2)
== "<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)\n (1., 1., 1.)>"
)
sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame="icrs")
assert repr(sc3).startswith("<SkyCoord (ICRS): (ra, dec) in deg\n")
sc_default = SkyCoord(0 * u.deg, 1 * u.deg)
assert repr(sc_default) == "<SkyCoord (ICRS): (ra, dec) in deg\n (0., 1.)>"
def test_repr_altaz():
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame="icrs", distance=1 * u.kpc)
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time("2005-03-21 00:00:00")
sc4 = sc2.transform_to(AltAz(location=loc, obstime=time))
assert repr(sc4).startswith(
"<SkyCoord (AltAz: obstime=2005-03-21 00:00:00.000, "
"location=(-2309223., -3695529., -4641767.) m, pressure=0.0 hPa, "
"temperature=0.0 deg_C, relative_humidity=0.0, obswl=1.0 micron):"
" (az, alt, distance) in (deg, deg, kpc)\n"
)
def test_ops():
"""
Tests miscellaneous operations like `len`
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg, frame="icrs")
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg, frame="icrs")
sc_empty = SkyCoord([] * u.deg, [] * u.deg, frame="icrs")
assert sc.isscalar
assert not sc_arr.isscalar
assert not sc_empty.isscalar
with pytest.raises(TypeError):
len(sc)
assert len(sc_arr) == 2
assert len(sc_empty) == 0
assert bool(sc)
assert bool(sc_arr)
assert not bool(sc_empty)
assert sc_arr[0].isscalar
assert len(sc_arr[:1]) == 1
# A scalar shouldn't be indexable
with pytest.raises(TypeError):
sc[0:]
# but it should be possible to just get an item
sc_item = sc[()]
assert sc_item.shape == ()
# and to turn it into an array
sc_1d = sc[np.newaxis]
assert sc_1d.shape == (1,)
with pytest.raises(TypeError):
iter(sc)
assert not isiterable(sc)
assert isiterable(sc_arr)
assert isiterable(sc_empty)
it = iter(sc_arr)
assert next(it).dec == sc_arr[0].dec
assert next(it).dec == sc_arr[1].dec
with pytest.raises(StopIteration):
next(it)
def test_none_transform():
"""
Ensure that transforming from a SkyCoord with no frame provided works like
ICRS
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg)
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg)
sc2 = sc.transform_to(ICRS)
assert sc.ra == sc2.ra and sc.dec == sc2.dec
sc5 = sc.transform_to("fk5")
assert sc5.ra == sc2.transform_to("fk5").ra
sc_arr2 = sc_arr.transform_to(ICRS)
sc_arr5 = sc_arr.transform_to("fk5")
npt.assert_array_equal(sc_arr5.ra, sc_arr2.transform_to("fk5").ra)
def test_position_angle():
c1 = SkyCoord(0 * u.deg, 0 * u.deg)
c2 = SkyCoord(1 * u.deg, 0 * u.deg)
assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0 * u.deg)
c3 = SkyCoord(1 * u.deg, 0.1 * u.deg)
assert c1.position_angle(c3) < 90 * u.deg
c4 = SkyCoord(0 * u.deg, 1 * u.deg)
assert_allclose(c1.position_angle(c4), 0 * u.deg)
carr1 = SkyCoord(0 * u.deg, [0, 1, 2] * u.deg)
carr2 = SkyCoord([-1, -2, -3] * u.deg, [0.1, 1.1, 2.1] * u.deg)
res = carr1.position_angle(carr2)
assert res.shape == (3,)
assert np.all(res < 360 * u.degree)
assert np.all(res > 270 * u.degree)
cicrs = SkyCoord(0 * u.deg, 0 * u.deg, frame="icrs")
cfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5")
# because of the frame transform, it's just a *bit* more than 90 degrees
assert cicrs.position_angle(cfk5) > 90.0 * u.deg
assert cicrs.position_angle(cfk5) < 91.0 * u.deg
def test_position_angle_directly():
"""Regression check for #3800: position_angle should accept floats."""
from astropy.coordinates.angle_utilities import position_angle
result = position_angle(10.0, 20.0, 10.0, 20.0)
assert result.unit is u.radian
assert result.value == 0.0
def test_sep_pa_equivalence():
"""Regression check for bug in #5702.
PA and separation from object 1 to 2 should be consistent with those
from 2 to 1
"""
cfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5")
cfk5B1950 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5", equinox="B1950")
# test with both default and explicit equinox #5722 and #3106
sep_forward = cfk5.separation(cfk5B1950)
sep_backward = cfk5B1950.separation(cfk5)
assert sep_forward != 0 and sep_backward != 0
assert_allclose(sep_forward, sep_backward)
posang_forward = cfk5.position_angle(cfk5B1950)
posang_backward = cfk5B1950.position_angle(cfk5)
assert posang_forward != 0 and posang_backward != 0
assert 179 < (posang_forward - posang_backward).wrap_at(360 * u.deg).degree < 181
dcfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5", distance=1 * u.pc)
dcfk5B1950 = SkyCoord(
1 * u.deg, 0 * u.deg, frame="fk5", equinox="B1950", distance=1.0 * u.pc
)
sep3d_forward = dcfk5.separation_3d(dcfk5B1950)
sep3d_backward = dcfk5B1950.separation_3d(dcfk5)
assert sep3d_forward != 0 and sep3d_backward != 0
assert_allclose(sep3d_forward, sep3d_backward)
def test_directional_offset_by():
# Round-trip tests: where is sc2 from sc1?
# Use those offsets from sc1 and verify you get to sc2.
npoints = 7 # How many points when doing vectors of SkyCoords
for sc1 in [
SkyCoord(0 * u.deg, -90 * u.deg), # South pole
SkyCoord(0 * u.deg, 90 * u.deg), # North pole
SkyCoord(1 * u.deg, 2 * u.deg),
SkyCoord(
np.linspace(0, 359, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="fk4",
),
SkyCoord(
np.linspace(359, 0, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="icrs",
),
SkyCoord(
np.linspace(-3, 3, npoints),
np.linspace(-90, 90, npoints),
unit=(u.rad, u.deg),
frame="barycentricmeanecliptic",
),
]:
for sc2 in [
SkyCoord(5 * u.deg, 10 * u.deg),
SkyCoord(
np.linspace(0, 359, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="galactic",
),
]:
# Find the displacement from sc1 to sc2,
posang = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
# then do the offset from sc1 and verify that you are at sc2
sc2a = sc1.directional_offset_by(position_angle=posang, separation=sep)
assert np.max(np.abs(sc2.separation(sc2a).arcsec)) < 1e-3
# Specific test cases
# Go over the North pole a little way, and
# over the South pole a long way, to get to same spot
sc1 = SkyCoord(0 * u.deg, 89 * u.deg)
for posang, sep in [(0 * u.deg, 2 * u.deg), (180 * u.deg, 358 * u.deg)]:
sc2 = sc1.directional_offset_by(posang, sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 89])
# Go twice as far to ensure that dec is actually changing
# and that >360deg is supported
sc2 = sc1.directional_offset_by(posang, 2 * sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 87])
# Verify that a separation of 180 deg in any direction gets to the antipode
# and 360 deg returns to start
sc1 = SkyCoord(10 * u.deg, 47 * u.deg)
for posang in np.linspace(0, 377, npoints):
sc2 = sc1.directional_offset_by(posang, 180 * u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [190, -47])
sc2 = sc1.directional_offset_by(posang, 360 * u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [10, 47])
# Verify that a 90 degree posang, which means East
# corresponds to an increase in RA, by ~separation/cos(dec) and
# a slight convergence to equator
sc1 = SkyCoord(10 * u.deg, 60 * u.deg)
sc2 = sc1.directional_offset_by(90 * u.deg, 1.0 * u.deg)
assert 11.9 < sc2.ra.degree < 12.0
assert 59.9 < sc2.dec.degree < 60.0
def test_table_to_coord():
"""
Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity`
initializer is the intermediary that translate the table columns into
something coordinates understands.
(Regression test for #1762 )
"""
from astropy.table import Column, Table
t = Table()
t.add_column(Column(data=[1, 2, 3], name="ra", unit=u.deg))
t.add_column(Column(data=[4, 5, 6], name="dec", unit=u.deg))
c = SkyCoord(t["ra"], t["dec"])
assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg)
assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg)
def assert_quantities_allclose(coord, q1s, attrs):
"""
Compare two tuples of quantities. This assumes that the values in q1 are of
order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the
two quantities are the *same*, in order to check that the representation
output has the expected units.
"""
q2s = [getattr(coord, attr) for attr in attrs]
assert len(q1s) == len(q2s)
for q1, q2 in zip(q1s, q2s):
assert q1.shape == q2.shape
assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit)
# Sets of inputs corresponding to Galactic frame
base_unit_attr_sets = [
("spherical", u.karcsec, u.karcsec, u.kpc, Latitude, "l", "b", "distance"),
("unitspherical", u.karcsec, u.karcsec, None, Latitude, "l", "b", None),
("physicsspherical", u.karcsec, u.karcsec, u.kpc, Angle, "phi", "theta", "r"),
("cartesian", u.km, u.km, u.km, u.Quantity, "u", "v", "w"),
("cylindrical", u.km, u.karcsec, u.km, Angle, "rho", "phi", "z"),
]
units_attr_sets = []
for base_unit_attr_set in base_unit_attr_sets:
repr_name = base_unit_attr_set[0]
for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]):
for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])):
for arrayify in True, False:
if arrayify:
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
units_attr_sets.append(
base_unit_attr_set + (representation, c1, c2, c3)
)
units_attr_args = (
"repr_name",
"unit1",
"unit2",
"unit3",
"cls2",
"attr1",
"attr2",
"attr3",
"representation",
"c1",
"c2",
"c3",
)
@pytest.mark.parametrize(
units_attr_args, [x for x in units_attr_sets if x[0] != "unitspherical"]
)
def test_skycoord_three_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(
c1,
c2,
c3,
unit=(unit1, unit2, unit3),
representation_type=representation,
frame=Galactic,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
sc = SkyCoord(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
1000 * c3 * u.Unit(unit3 / 1000),
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr3: c3}
sc = SkyCoord(
c1,
c2,
unit=(unit1, unit2, unit3),
frame=Galactic,
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr1: c1, attr2: c2, attr3: c3}
sc = SkyCoord(
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
@pytest.mark.parametrize(
units_attr_args,
[x for x in units_attr_sets if x[0] in ("spherical", "unitspherical")],
)
def test_skycoord_spherical_two_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(
c1, c2, unit=(unit1, unit2), frame=Galactic, representation_type=representation
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
sc = SkyCoord(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
kwargs = {attr1: c1, attr2: c2}
sc = SkyCoord(
frame=Galactic,
unit=(unit1, unit2),
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
@pytest.mark.parametrize(
units_attr_args, [x for x in units_attr_sets if x[0] != "unitspherical"]
)
def test_galactic_three_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = Galactic(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
1000 * c3 * u.Unit(unit3 / 1000),
representation_type=representation,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr3: c3 * unit3}
sc = Galactic(c1 * unit1, c2 * unit2, representation_type=representation, **kwargs)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr1: c1 * unit1, attr2: c2 * unit2, attr3: c3 * unit3}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
@pytest.mark.parametrize(
units_attr_args,
[x for x in units_attr_sets if x[0] in ("spherical", "unitspherical")],
)
def test_galactic_spherical_two_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = Galactic(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
representation_type=representation,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
sc = Galactic(c1 * unit1, c2 * unit2, representation_type=representation)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
kwargs = {attr1: c1 * unit1, attr2: c2 * unit2}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
@pytest.mark.parametrize(
("repr_name", "unit1", "unit2", "unit3", "cls2", "attr1", "attr2", "attr3"),
[x for x in base_unit_attr_sets if x[0] != "unitspherical"],
)
def test_skycoord_coordinate_input(
repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3
):
c1, c2, c3 = 1, 2, 3
sc = SkyCoord(
[(c1, c2, c3)],
unit=(unit1, unit2, unit3),
representation_type=repr_name,
frame="galactic",
)
assert_quantities_allclose(
sc, ([c1] * unit1, [c2] * unit2, [c3] * unit3), (attr1, attr2, attr3)
)
c1, c2, c3 = 1 * unit1, 2 * unit2, 3 * unit3
sc = SkyCoord([(c1, c2, c3)], representation_type=repr_name, frame="galactic")
assert_quantities_allclose(
sc, ([1] * unit1, [2] * unit2, [3] * unit3), (attr1, attr2, attr3)
)
def test_skycoord_string_coordinate_input():
sc = SkyCoord("01 02 03 +02 03 04", unit="deg", representation_type="unitspherical")
assert_quantities_allclose(
sc,
(Angle("01:02:03", unit="deg"), Angle("02:03:04", unit="deg")),
("ra", "dec"),
)
sc = SkyCoord(
["01 02 03 +02 03 04"], unit="deg", representation_type="unitspherical"
)
assert_quantities_allclose(
sc,
(Angle(["01:02:03"], unit="deg"), Angle(["02:03:04"], unit="deg")),
("ra", "dec"),
)
def test_units():
sc = SkyCoord(1, 2, 3, unit="m", representation_type="cartesian") # All get meters
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
# All get u.m
sc = SkyCoord(1, 2 * u.km, 3, unit="m", representation_type="cartesian")
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit=u.m, representation_type="cartesian") # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit="m, km, pc", representation_type="cartesian")
assert_quantities_allclose(sc, (1 * u.m, 2 * u.km, 3 * u.pc), ("x", "y", "z"))
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, 3, unit=(u.m, u.m), representation_type="cartesian")
assert "should have matching physical types" in str(err.value)
SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation_type="cartesian")
assert_quantities_allclose(sc, (1 * u.m, 2 * u.km, 3 * u.pc), ("x", "y", "z"))
@pytest.mark.xfail
def test_units_known_fail():
# should fail but doesn't => corner case oddity
with pytest.raises(u.UnitsError):
SkyCoord(1, 2, 3, unit=u.deg, representation_type="spherical")
def test_nodata_failure():
with pytest.raises(ValueError):
SkyCoord()
@pytest.mark.parametrize(("mode", "origin"), [("wcs", 0), ("all", 0), ("all", 1)])
def test_wcs_methods(mode, origin):
from astropy.utils.data import get_pkg_data_contents
from astropy.wcs import WCS
from astropy.wcs.utils import pixel_to_skycoord
header = get_pkg_data_contents(
"../../wcs/tests/data/maps/1904-66_TAN.hdr", encoding="binary"
)
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89.0 * u.deg, frame="icrs")
xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to("icrs")
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# also try to round-trip with `from_pixel`
scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to(
"icrs"
)
assert_allclose(scnew.ra.degree, ref.ra.degree)
assert_allclose(scnew.dec.degree, ref.dec.degree)
# Also make sure the right type comes out
class SkyCoord2(SkyCoord):
pass
scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin)
assert scnew.__class__ is SkyCoord
assert scnew2.__class__ is SkyCoord2
def test_frame_attr_transform_inherit():
"""
Test that frame attributes get inherited as expected during transform.
Driven by #3106.
"""
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5)
c2 = c.transform_to(FK4)
assert c2.equinox.value == "B1950.000"
assert c2.obstime.value == "B1950.000"
c2 = c.transform_to(FK4(equinox="J1975", obstime="J1980"))
assert c2.equinox.value == "J1975.000"
assert c2.obstime.value == "J1980.000"
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4)
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J2000.000"
assert c2.obstime is None
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime="J1980")
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J2000.000"
assert c2.obstime.value == "J1980.000"
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox="J1975", obstime="J1980")
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J1975.000"
assert c2.obstime.value == "J1980.000"
c2 = c.transform_to(FK5(equinox="J1990"))
assert c2.equinox.value == "J1990.000"
assert c2.obstime.value == "J1980.000"
# The work-around for #5722
c = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5")
c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", equinox="B1950.000")
c2 = c1.transform_to(c)
assert not c2.is_equivalent_frame(c) # counterintuitive, but documented
assert c2.equinox.value == "B1950.000"
c3 = c1.transform_to(c, merge_attributes=False)
assert c3.equinox.value == "J2000.000"
assert c3.is_equivalent_frame(c)
def test_deepcopy():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
c2 = copy.copy(c1)
c3 = copy.deepcopy(c1)
c4 = SkyCoord(
[1, 2] * u.m,
[2, 3] * u.m,
[3, 4] * u.m,
representation_type="cartesian",
frame="fk5",
obstime="J1999.9",
equinox="J1988.8",
)
c5 = copy.deepcopy(c4)
assert np.all(c5.x == c4.x) # and y and z
assert c5.frame.name == c4.frame.name
assert c5.obstime == c4.obstime
assert c5.equinox == c4.equinox
assert c5.representation_type == c4.representation_type
def test_no_copy():
c1 = SkyCoord(np.arange(10.0) * u.hourangle, np.arange(20.0, 30.0) * u.deg)
c2 = SkyCoord(c1, copy=False)
# Note: c1.ra and c2.ra will *not* share memory, as these are recalculated
# to be in "preferred" units. See discussion in #4883.
assert np.may_share_memory(c1.data.lon, c2.data.lon)
c3 = SkyCoord(c1, copy=True)
assert not np.may_share_memory(c1.data.lon, c3.data.lon)
def test_immutable():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(AttributeError):
c1.ra = 3.0
c1.foo = 42
assert c1.foo == 42
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_search_around():
"""
Test the search_around_* methods
Here we don't actually test the values are right, just that the methods of
SkyCoord work. The accuracy tests are in ``test_matching.py``
"""
from astropy.utils import NumpyRNGContext
with NumpyRNGContext(987654321):
sc1 = SkyCoord(
np.random.rand(20) * 360.0 * u.degree,
(np.random.rand(20) * 180.0 - 90.0) * u.degree,
)
sc2 = SkyCoord(
np.random.rand(100) * 360.0 * u.degree,
(np.random.rand(100) * 180.0 - 90.0) * u.degree,
)
sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20) * u.kpc)
sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100) * u.kpc)
idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10 * u.deg)
idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250 * u.pc)
def test_init_with_frame_instance_keyword():
# Frame instance
c1 = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox="J2010"))
assert c1.equinox == Time("J2010")
# Frame instance with data (data gets ignored)
c2 = SkyCoord(
3 * u.deg, 4 * u.deg, frame=FK5(1.0 * u.deg, 2 * u.deg, equinox="J2010")
)
assert c2.equinox == Time("J2010")
assert allclose(c2.ra.degree, 3)
assert allclose(c2.dec.degree, 4)
# SkyCoord instance
c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1)
assert c3.equinox == Time("J2010")
# Check duplicate arguments
with pytest.raises(ValueError) as err:
c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox="J2010"), equinox="J2001")
assert "Cannot specify frame attribute 'equinox'" in str(err.value)
def test_guess_from_table():
from astropy.table import Column, Table
from astropy.utils import NumpyRNGContext
tab = Table()
with NumpyRNGContext(987654321):
tab.add_column(Column(data=np.random.rand(10), unit="deg", name="RA[J2000]"))
tab.add_column(Column(data=np.random.rand(10), unit="deg", name="DEC[J2000]"))
sc = SkyCoord.guess_from_table(tab)
npt.assert_array_equal(sc.ra.deg, tab["RA[J2000]"])
npt.assert_array_equal(sc.dec.deg, tab["DEC[J2000]"])
# try without units in the table
tab["RA[J2000]"].unit = None
tab["DEC[J2000]"].unit = None
# should fail if not given explicitly
with pytest.raises(u.UnitsError):
sc2 = SkyCoord.guess_from_table(tab)
# but should work if provided
sc2 = SkyCoord.guess_from_table(tab, unit=u.deg)
npt.assert_array_equal(sc2.ra.deg, tab["RA[J2000]"])
npt.assert_array_equal(sc2.dec.deg, tab["DEC[J2000]"])
# should fail if two options are available - ambiguity bad!
tab.add_column(Column(data=np.random.rand(10), name="RA_J1900"))
with pytest.raises(ValueError) as excinfo:
SkyCoord.guess_from_table(tab, unit=u.deg)
assert "J1900" in excinfo.value.args[0] and "J2000" in excinfo.value.args[0]
tab.remove_column("RA_J1900")
tab["RA[J2000]"].unit = u.deg
tab["DEC[J2000]"].unit = u.deg
# but should succeed if the ambiguity can be broken b/c one of the matches
# is the name of a different component
tab.add_column(Column(data=np.random.rand(10) * u.mas / u.yr, name="pm_ra_cosdec"))
tab.add_column(Column(data=np.random.rand(10) * u.mas / u.yr, name="pm_dec"))
sc3 = SkyCoord.guess_from_table(tab)
assert u.allclose(sc3.ra, tab["RA[J2000]"])
assert u.allclose(sc3.dec, tab["DEC[J2000]"])
assert u.allclose(sc3.pm_ra_cosdec, tab["pm_ra_cosdec"])
assert u.allclose(sc3.pm_dec, tab["pm_dec"])
# should fail if stuff doesn't have proper units
tab["RA[J2000]"].unit = None
tab["DEC[J2000]"].unit = None
with pytest.raises(u.UnitTypeError, match="no unit was given."):
SkyCoord.guess_from_table(tab)
tab.remove_column("pm_ra_cosdec")
tab.remove_column("pm_dec")
# should also fail if user specifies something already in the table, but
# should succeed even if the user has to give one of the components
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tab, ra=tab["RA[J2000]"], unit=u.deg)
oldra = tab["RA[J2000]"]
tab.remove_column("RA[J2000]")
sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg)
npt.assert_array_equal(sc3.ra.deg, oldra)
npt.assert_array_equal(sc3.dec.deg, tab["DEC[J2000]"])
# check a few non-ICRS/spherical systems
x, y, z = np.arange(3).reshape(3, 1) * u.pc
l, b = np.arange(2).reshape(2, 1) * u.deg
tabcart = Table([x, y, z], names=("x", "y", "z"))
tabgal = Table([b, l], names=("b", "l"))
sc_cart = SkyCoord.guess_from_table(tabcart, representation_type="cartesian")
npt.assert_array_equal(sc_cart.x, x)
npt.assert_array_equal(sc_cart.y, y)
npt.assert_array_equal(sc_cart.z, z)
sc_gal = SkyCoord.guess_from_table(tabgal, frame="galactic")
npt.assert_array_equal(sc_gal.l, l)
npt.assert_array_equal(sc_gal.b, b)
# also try some column names that *end* with the attribute name
tabgal["b"].name = "gal_b"
tabgal["l"].name = "gal_l"
SkyCoord.guess_from_table(tabgal, frame="galactic")
tabgal["gal_b"].name = "blob"
tabgal["gal_l"].name = "central"
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tabgal, frame="galactic")
def test_skycoord_list_creation():
"""
Test that SkyCoord can be created in a reasonable way with lists of SkyCoords
(regression for #2702)
"""
sc = SkyCoord(ra=[1, 2, 3] * u.deg, dec=[4, 5, 6] * u.deg)
sc0 = sc[0]
sc2 = sc[2]
scnew = SkyCoord([sc0, sc2])
assert np.all(scnew.ra == [1, 3] * u.deg)
assert np.all(scnew.dec == [4, 6] * u.deg)
# also check ranges
sc01 = sc[:2]
scnew2 = SkyCoord([sc01, sc2])
assert np.all(scnew2.ra == sc.ra)
assert np.all(scnew2.dec == sc.dec)
# now try with a mix of skycoord, frame, and repr objects
frobj = ICRS(2 * u.deg, 5 * u.deg)
reprobj = UnitSphericalRepresentation(3 * u.deg, 6 * u.deg)
scnew3 = SkyCoord([sc0, frobj, reprobj])
assert np.all(scnew3.ra == sc.ra)
assert np.all(scnew3.dec == sc.dec)
# should *fail* if different frame attributes or types are passed in
scfk5_j2000 = SkyCoord(1 * u.deg, 4 * u.deg, frame="fk5")
with pytest.raises(ValueError):
SkyCoord([sc0, scfk5_j2000])
scfk5_j2010 = SkyCoord(1 * u.deg, 4 * u.deg, frame="fk5", equinox="J2010")
with pytest.raises(ValueError):
SkyCoord([scfk5_j2000, scfk5_j2010])
# but they should inherit if they're all consistent
scfk5_2_j2010 = SkyCoord(2 * u.deg, 5 * u.deg, frame="fk5", equinox="J2010")
scfk5_3_j2010 = SkyCoord(3 * u.deg, 6 * u.deg, frame="fk5", equinox="J2010")
scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010])
assert np.all(scnew4.ra == sc.ra)
assert np.all(scnew4.dec == sc.dec)
assert scnew4.equinox == Time("J2010")
def test_nd_skycoord_to_string():
c = SkyCoord(np.ones((2, 2)), 1, unit=("deg", "deg"))
ts = c.to_string()
assert np.all(ts.shape == c.shape)
assert np.all(ts == "1 1")
def test_equiv_skycoord():
sci1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
sci2 = SkyCoord(1 * u.deg, 3 * u.deg, frame="icrs")
assert sci1.is_equivalent_frame(sci1)
assert sci1.is_equivalent_frame(sci2)
assert sci1.is_equivalent_frame(ICRS())
assert not sci1.is_equivalent_frame(FK5())
with pytest.raises(TypeError):
sci1.is_equivalent_frame(10)
scf1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5")
scf2 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", equinox="J2005")
# obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come
# to come out different because they're part of SkyCoord
scf3 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", obstime="J2005")
assert scf1.is_equivalent_frame(scf1)
assert not scf1.is_equivalent_frame(sci1)
assert scf1.is_equivalent_frame(FK5())
assert not scf1.is_equivalent_frame(scf2)
assert scf2.is_equivalent_frame(FK5(equinox="J2005"))
assert not scf3.is_equivalent_frame(scf1)
assert not scf3.is_equivalent_frame(FK5(equinox="J2005"))
def test_equiv_skycoord_with_extra_attrs():
"""Regression test for #10658."""
# GCRS has a CartesianRepresentationAttribute called obsgeoloc
gcrs = GCRS(
1 * u.deg, 2 * u.deg, obsgeoloc=CartesianRepresentation([1, 2, 3], unit=u.m)
)
# Create a SkyCoord where obsgeoloc tags along as an extra attribute
sc1 = SkyCoord(gcrs).transform_to(ICRS)
# Now create a SkyCoord with an equivalent frame but without the extra attribute
sc2 = SkyCoord(sc1.frame)
# The SkyCoords are therefore not equivalent, but check both directions
assert not sc1.is_equivalent_frame(sc2)
# This way around raised a TypeError which is fixed by #10658
assert not sc2.is_equivalent_frame(sc1)
def test_constellations():
# the actual test for accuracy is in test_funcs - this is just meant to make
# sure we get sensible answers
sc = SkyCoord(135 * u.deg, 65 * u.deg)
assert sc.get_constellation() == "Ursa Major"
assert sc.get_constellation(short_name=True) == "UMa"
scs = SkyCoord([135] * 2 * u.deg, [65] * 2 * u.deg)
npt.assert_equal(scs.get_constellation(), ["Ursa Major"] * 2)
npt.assert_equal(scs.get_constellation(short_name=True), ["UMa"] * 2)
@pytest.mark.remote_data
def test_constellations_with_nameresolve():
assert SkyCoord.from_name("And I").get_constellation(short_name=True) == "And"
# you'd think "And ..." should be in Andromeda. But you'd be wrong.
assert SkyCoord.from_name("And VI").get_constellation() == "Pegasus"
# maybe it's because And VI isn't really a galaxy?
assert SkyCoord.from_name("And XXII").get_constellation() == "Pisces"
assert SkyCoord.from_name("And XXX").get_constellation() == "Cassiopeia"
# ok maybe not
# ok, but at least some of the others do make sense...
assert (
SkyCoord.from_name("Coma Cluster").get_constellation(short_name=True) == "Com"
)
assert SkyCoord.from_name("Orion Nebula").get_constellation() == "Orion"
assert SkyCoord.from_name("Triangulum Galaxy").get_constellation() == "Triangulum"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg)
sc.representation_type = "cartesian"
assert sc[0].representation_type is CartesianRepresentation
def test_spherical_offsets_to_api():
i00 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame="icrs")
fk5 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame="fk5")
with pytest.raises(ValueError):
# different frames should fail
i00.spherical_offsets_to(fk5)
i1deg = ICRS(1 * u.deg, 1 * u.deg)
dra, ddec = i00.spherical_offsets_to(i1deg)
assert_allclose(dra, 1 * u.deg)
assert_allclose(ddec, 1 * u.deg)
# make sure an abbreviated array-based version of the above also works
i00s = SkyCoord([0] * 4 * u.arcmin, [0] * 4 * u.arcmin, frame="icrs")
i01s = SkyCoord([0] * 4 * u.arcmin, np.arange(4) * u.arcmin, frame="icrs")
dra, ddec = i00s.spherical_offsets_to(i01s)
assert_allclose(dra, 0 * u.arcmin)
assert_allclose(ddec, np.arange(4) * u.arcmin)
@pytest.mark.parametrize("frame", ["icrs", "galactic"])
@pytest.mark.parametrize(
"comparison_data",
[
(0 * u.arcmin, 1 * u.arcmin),
(1 * u.arcmin, 0 * u.arcmin),
(1 * u.arcmin, 1 * u.arcmin),
],
)
def test_spherical_offsets_roundtrip(frame, comparison_data):
i00 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame=frame)
comparison = SkyCoord(*comparison_data, frame=frame)
dlon, dlat = i00.spherical_offsets_to(comparison)
assert_allclose(dlon, comparison.data.lon)
assert_allclose(dlat, comparison.data.lat)
i00_back = comparison.spherical_offsets_by(-dlon, -dlat)
# This reaches machine precision when only one component is changed, but for
# the third parametrized case (both lon and lat change), the transformation
# will have finite accuracy:
assert_allclose(i00_back.data.lon, i00.data.lon, atol=1e-10 * u.rad)
assert_allclose(i00_back.data.lat, i00.data.lat, atol=1e-10 * u.rad)
# Test roundtripping the other direction:
init_c = SkyCoord(40.0 * u.deg, 40.0 * u.deg, frame=frame)
new_c = init_c.spherical_offsets_by(3.534 * u.deg, 2.2134 * u.deg)
dlon, dlat = new_c.spherical_offsets_to(init_c)
back_c = new_c.spherical_offsets_by(dlon, dlat)
assert init_c.separation(back_c) < 1e-10 * u.deg
def test_frame_attr_changes():
"""
This tests the case where a frame is added with a new frame attribute after
a SkyCoord has been created. This is necessary because SkyCoords get the
attributes set at creation time, but the set of attributes can change as
frames are added or removed from the transform graph. This makes sure that
everything continues to work consistently.
"""
sc_before = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
assert "fakeattr" not in dir(sc_before)
class FakeFrame(BaseCoordinateFrame):
fakeattr = Attribute()
# doesn't matter what this does as long as it just puts the frame in the
# transform graph
transset = (ICRS, FakeFrame, lambda c, f: c)
frame_transform_graph.add_transform(*transset)
try:
assert "fakeattr" in dir(sc_before)
assert sc_before.fakeattr is None
sc_after1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
assert "fakeattr" in dir(sc_after1)
assert sc_after1.fakeattr is None
sc_after2 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs", fakeattr=1)
assert sc_after2.fakeattr == 1
finally:
frame_transform_graph.remove_transform(*transset)
assert "fakeattr" not in dir(sc_before)
assert "fakeattr" not in dir(sc_after1)
assert "fakeattr" not in dir(sc_after2)
def test_cache_clear_sc():
from astropy.coordinates import SkyCoord
i = SkyCoord(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache["representation"]) == 2
i.cache.clear()
assert len(i.cache["representation"]) == 0
def test_set_attribute_exceptions():
"""Ensure no attribute for any frame can be set directly.
Though it is fine if the current frame does not have it."""
sc = SkyCoord(1.0 * u.deg, 2.0 * u.deg, frame="fk5")
assert hasattr(sc.frame, "equinox")
with pytest.raises(AttributeError):
sc.equinox = "B1950"
assert sc.relative_humidity is None
sc.relative_humidity = 0.5
assert sc.relative_humidity == 0.5
assert not hasattr(sc.frame, "relative_humidity")
def test_extra_attributes():
"""Ensure any extra attributes are dealt with correctly.
Regression test against #5743.
"""
obstime_string = ["2017-01-01T00:00", "2017-01-01T00:10"]
obstime = Time(obstime_string)
sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string)
assert not hasattr(sc.frame, "obstime")
assert type(sc.obstime) is Time
assert sc.obstime.shape == (2,)
assert np.all(sc.obstime == obstime)
# ensure equivalency still works for more than one obstime.
assert sc.is_equivalent_frame(sc)
sc_1 = sc[1]
assert sc_1.obstime == obstime[1]
# Transforming to FK4 should use sc.obstime.
sc_fk4 = sc.transform_to("fk4")
assert np.all(sc_fk4.frame.obstime == obstime)
# And transforming back should not loose it.
sc2 = sc_fk4.transform_to("icrs")
assert not hasattr(sc2.frame, "obstime")
assert np.all(sc2.obstime == obstime)
# Ensure obstime get taken from the SkyCoord if passed in directly.
# (regression test for #5749).
sc3 = SkyCoord([0.0, 1.0], [2.0, 3.0], unit="deg", frame=sc)
assert np.all(sc3.obstime == obstime)
# Finally, check that we can delete such attributes.
del sc3.obstime
assert sc3.obstime is None
def test_apply_space_motion():
# use this 12 year period because it's a multiple of 4 to avoid the quirks
# of leap years while having 2 leap seconds in it
t1 = Time("2000-01-01T00:00")
t2 = Time("2012-01-01T00:00")
# Check a very simple case first:
frame = ICRS(
ra=10.0 * u.deg,
dec=0 * u.deg,
distance=10.0 * u.pc,
pm_ra_cosdec=0.1 * u.deg / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
# Cases that should work (just testing input for now):
c1 = SkyCoord(frame, obstime=t1, pressure=101 * u.kPa)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied1 = c1.apply_space_motion(new_obstime=t2)
applied2 = c1.apply_space_motion(dt=12 * u.year)
assert isinstance(applied1.frame, c1.frame.__class__)
assert isinstance(applied2.frame, c1.frame.__class__)
assert_allclose(applied1.ra, applied2.ra)
assert_allclose(applied1.pm_ra_cosdec, applied2.pm_ra_cosdec)
assert_allclose(applied1.dec, applied2.dec)
assert_allclose(applied1.distance, applied2.distance)
# ensure any frame attributes that were there before get passed through
assert applied1.pressure == c1.pressure
# there were 2 leap seconds between 2000 and 2010, so the difference in
# the two forms of time evolution should be ~2 sec
adt = np.abs(applied2.obstime - applied1.obstime)
assert 1.9 * u.second < adt.to(u.second) < 2.1 * u.second
c2 = SkyCoord(frame)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied3 = c2.apply_space_motion(dt=6 * u.year)
assert isinstance(applied3.frame, c1.frame.__class__)
assert applied3.obstime is None
# this should *not* be .6 deg due to space-motion on a sphere, but it
# should be fairly close
assert 0.5 * u.deg < applied3.ra - c1.ra < 0.7 * u.deg
# the two cases should only match somewhat due to it being space motion, but
# they should be at least this close
assert quantity_allclose(
applied1.ra - c1.ra, (applied3.ra - c1.ra) * 2, atol=1e-3 * u.deg
)
# but *not* this close
assert not quantity_allclose(
applied1.ra - c1.ra, (applied3.ra - c1.ra) * 2, atol=1e-4 * u.deg
)
with pytest.raises(ValueError):
c2.apply_space_motion(new_obstime=t2)
def test_custom_frame_skycoord():
# also regression check for the case from #7069
class BlahBleeBlopFrame(BaseCoordinateFrame):
default_representation = SphericalRepresentation
# without a differential, SkyCoord creation fails
# default_differential = SphericalDifferential
_frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "lon", "recommended"),
RepresentationMapping("lat", "lat", "recommended"),
RepresentationMapping("distance", "radius", "recommended"),
]
}
SkyCoord(lat=1 * u.deg, lon=2 * u.deg, frame=BlahBleeBlopFrame)
def test_user_friendly_pm_error():
"""
This checks that a more user-friendly error message is raised for the user
if they pass, e.g., pm_ra instead of pm_ra_cosdec
"""
with pytest.raises(ValueError) as e:
SkyCoord(
ra=150 * u.deg,
dec=-11 * u.deg,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert "pm_ra_cosdec" in str(e.value)
with pytest.raises(ValueError) as e:
SkyCoord(
l=150 * u.deg,
b=-11 * u.deg,
pm_l=100 * u.mas / u.yr,
pm_b=10 * u.mas / u.yr,
frame="galactic",
)
assert "pm_l_cosb" in str(e.value)
# The special error should not turn on here:
with pytest.raises(ValueError) as e:
SkyCoord(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
representation_type="cartesian",
)
assert "pm_ra_cosdec" not in str(e.value)
def test_contained_by():
"""
Test Skycoord.contained(wcs,image)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
test_wcs = WCS(fits.Header.fromstring(header.strip(), "\n"))
assert SkyCoord(254, 2, unit="deg").contained_by(test_wcs)
assert not SkyCoord(240, 2, unit="deg").contained_by(test_wcs)
img = np.zeros((2136, 2078))
assert SkyCoord(250, 2, unit="deg").contained_by(test_wcs, img)
assert not SkyCoord(240, 2, unit="deg").contained_by(test_wcs, img)
ra = np.array([254.2, 254.1])
dec = np.array([2, 12.1])
coords = SkyCoord(ra, dec, unit="deg")
assert np.all(test_wcs.footprint_contains(coords) == np.array([True, False]))
def test_none_differential_type():
"""
This is a regression test for #8021
"""
from astropy.coordinates import BaseCoordinateFrame
class MockHeliographicStonyhurst(BaseCoordinateFrame):
default_representation = SphericalRepresentation
frame_specific_representation_info = {
SphericalRepresentation: [
RepresentationMapping(
reprname="lon", framename="lon", defaultunit=u.deg
),
RepresentationMapping(
reprname="lat", framename="lat", defaultunit=u.deg
),
RepresentationMapping(
reprname="distance", framename="radius", defaultunit=None
),
]
}
fr = MockHeliographicStonyhurst(lon=1 * u.deg, lat=2 * u.deg, radius=10 * u.au)
SkyCoord(0 * u.deg, fr.lat, fr.radius, frame=fr) # this was the failure
def test_multiple_aliases():
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ["alias_1", "alias_2"]
default_representation = SphericalRepresentation
# Register a transform, which adds the aliases to the transform graph
tfun = lambda c, f: f.__class__(lon=c.lon, lat=c.lat)
ftrans = FunctionTransform(
tfun,
MultipleAliasesFrame,
MultipleAliasesFrame,
register_graph=frame_transform_graph,
)
coord = SkyCoord(lon=1 * u.deg, lat=2 * u.deg, frame=MultipleAliasesFrame)
# Test attribute-style access returns self (not a copy)
assert coord.alias_1 is coord
assert coord.alias_2 is coord
# Test for aliases in __dir__()
assert "alias_1" in coord.__dir__()
assert "alias_2" in coord.__dir__()
# Test transform_to() calls
assert isinstance(coord.transform_to("alias_1").frame, MultipleAliasesFrame)
assert isinstance(coord.transform_to("alias_2").frame, MultipleAliasesFrame)
ftrans.unregister(frame_transform_graph)
@pytest.mark.parametrize(
"kwargs, error_message",
[
(
{"ra": 1, "dec": 1, "distance": 1 * u.pc, "unit": "deg"},
r"Unit 'deg' \(angle\) could not be applied to 'distance'. ",
),
(
{
"rho": 1 * u.m,
"phi": 1,
"z": 1 * u.m,
"unit": "deg",
"representation_type": "cylindrical",
},
r"Unit 'deg' \(angle\) could not be applied to 'rho'. ",
),
],
)
def test_passing_inconsistent_coordinates_and_units_raises_helpful_error(
kwargs, error_message
):
# https://github.com/astropy/astropy/issues/10725
with pytest.raises(ValueError, match=error_message):
SkyCoord(**kwargs)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_match_to_catalog_3d_and_sky():
# Test for issue #5857. See PR #11449
cfk5_default = SkyCoord(
[1, 2, 3, 4] * u.degree,
[0, 0, 0, 0] * u.degree,
distance=[1, 1, 1.5, 1] * u.kpc,
frame="fk5",
)
cfk5_J1950 = cfk5_default.transform_to(FK5(equinox="J1950"))
idx, angle, quantity = cfk5_J1950.match_to_catalog_3d(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=1e-14 * u.deg, rtol=0)
assert_allclose(quantity, 0 * u.kpc, atol=1e-14 * u.kpc, rtol=0)
idx, angle, distance = cfk5_J1950.match_to_catalog_sky(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=1e-14 * u.deg, rtol=0)
assert_allclose(distance, 0 * u.kpc, atol=1e-14 * u.kpc, rtol=0)
def test_subclass_property_exception_error():
"""Regression test for gh-8340.
Non-existing attribute access inside a property should give attribute
error for the attribute, not for the property.
"""
class custom_coord(SkyCoord):
@property
def prop(self):
return self.random_attr
c = custom_coord("00h42m30s", "+41d12m00s", frame="icrs")
with pytest.raises(AttributeError, match="random_attr"):
# Before this matched "prop" rather than "random_attr"
c.prop
|
ec457859582587b41041d0497056e50add23f59a9f420971c7e1447b2428ae76 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
import itertools
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
import numpy as np
from packaging.version import Version
from astropy.utils import data
from astropy.utils.exceptions import AstropyUserWarning
path_like = (str, bytes, os.PathLike)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = f"_update_{notification}"
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state["_listeners"] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter("__name__")):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.current_thread()
single_thread = (
threading.active_count() == 1 and curr_thread.name == "MainThread"
)
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn(
"KeyboardInterrupt ignored until {} is complete!".format(
func.__name__
),
AstropyUserWarning,
)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode("ascii")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.str_):
ns = np.char.encode(s, "ascii").view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.bytes_):
raise TypeError("string operation on non-string array")
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode("ascii")
except UnicodeDecodeError:
warnings.warn(
"non-ASCII characters are present in the FITS "
'file header and have been replaced by "?" '
"characters",
AstropyUserWarning,
)
s = s.decode("ascii", errors="replace")
return s.replace("\ufffd", "?")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.bytes_):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array : https://github.com/numpy/numpy/issues/13156
dt = s.dtype.str.replace("S", "U")
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, "ascii").view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.str_):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError("string operation on non-string array")
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, "readable"):
return f.readable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "read"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "r+"):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, "writable"):
return f.writable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "write"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "wa+"):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, "buffer"):
return isfile(f.buffer)
elif hasattr(f, "raw"):
return isfile(f.raw)
return False
def fileobj_name(f):
"""
Returns the 'name' of file-like object *f*, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, (str, bytes)):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, "name"):
return f.name
elif hasattr(f, "filename"):
return f.filename
elif hasattr(f, "__class__"):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if *f* is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, path_like):
return True
if hasattr(f, "closed"):
return f.closed
elif hasattr(f, "fileobj") and hasattr(f.fileobj, "closed"):
return f.fileobj.closed
elif hasattr(f, "fp") and hasattr(f.fp, "closed"):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, "fileobj") and hasattr(f.fileobj, "mode"):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, "fileobj_mode"):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, "fp") and hasattr(f.fp, "mode"):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, "mode"):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return "rb"
elif mode == gzip.WRITE:
return "wb"
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if "+" in mode:
mode = mode.replace("+", "")
mode += "+"
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, "binary"):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return "b" in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split("\n\n")
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return "\n\n".join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if sys.platform == "darwin" and Version(platform.mac_ver()[0]) < Version(
"10.9"
):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024**3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2**32) - 1
_WIN_WRITE_LIMIT = (2**31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : ndarray
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
try:
seekable = outfile.seekable()
except AttributeError:
seekable = False
if isfile(outfile) and seekable:
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (
sys.platform == "darwin"
and arr.nbytes >= _OSX_WRITE_LIMIT + 1
and arr.nbytes % 4096 == 0
):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith("win"):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx : idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, "nditer"):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order="C"):
fileobj.write(item.tobytes())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if (sys.byteorder == "little" and byteorder == ">") or (
sys.byteorder == "big" and byteorder == "<"
):
for item in arr.flat:
fileobj.write(item.byteswap().tobytes())
else:
for item in arr.flat:
fileobj.write(item.tobytes())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif array.dtype.itemsize == dtype.itemsize and not (
np.issubdtype(array.dtype, np.number) and np.issubdtype(dtype, np.number)
):
# Includes a special case when both dtypes are at least numeric to
# account for old Trac ticket 218 (now inaccessible).
return array.view(dtype)
else:
return array.astype(dtype)
def _pseudo_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
# special case for int8
if dtype.kind == "i" and dtype.itemsize == 1:
return -128
assert dtype.kind == "u"
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_integer(dtype):
return (dtype.kind == "u" and dtype.itemsize >= 2) or (
dtype.kind == "i" and dtype.itemsize == 1
)
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode("utf8") + b" ", dtype="S1")
# locations of the blanks
blank_loc = np.nonzero(arr == b" ")[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, "base") and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ""
if not isinstance(hdulist, list):
hdulist = [
hdulist,
]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = (
"Not enough space on disk: requested {}, available {}. ".format(
hdulist_size, free_space
)
)
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(f"io/fits/tests/data/{filename}", "astropy")
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in "SU":
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == "S" else 4
dt_int = f"({dt.itemsize // bpc},){dt.byteorder}u{bpc}"
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j : j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
def _is_dask_array(data):
"""Check whether data is a dask array.
We avoid importing dask unless it is likely it is a dask array,
so that non-dask code is not slowed down.
"""
if not hasattr(data, "compute"):
return False
try:
from dask.array import Array
except ImportError:
# If we cannot import dask, surely this cannot be a
# dask array!
return False
else:
return isinstance(data, Array)
|
f7abd3ff0d82f9ef013052e4b69823604568d6182b8b3d860b86962cb1e83332 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Classes to read AAS MRT table format
Ref: https://journals.aas.org/mrt-standards
:Copyright: Smithsonian Astrophysical Observatory (2021)
:Author: Tom Aldcroft ([email protected]), \
Suyog Garg ([email protected])
"""
import re
import warnings
from io import StringIO
from math import ceil, floor
from string import Template
from textwrap import wrap
import numpy as np
from astropy import units as u
from astropy.table import Column, MaskedColumn, Table
from . import cds, core, fixedwidth
MAX_SIZE_README_LINE = 80
MAX_COL_INTLIMIT = 100000
__doctest_skip__ = ["*"]
BYTE_BY_BYTE_TEMPLATE = [
"Byte-by-byte Description of file: $file",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
"$bytebybyte",
"--------------------------------------------------------------------------------",
]
MRT_TEMPLATE = [
"Title:",
"Authors:",
"Table:",
"================================================================================",
"$bytebybyte",
"Notes:",
"--------------------------------------------------------------------------------",
]
class MrtSplitter(fixedwidth.FixedWidthSplitter):
"""
Contains the join function to left align the MRT columns
when writing to a file.
"""
def join(self, vals, widths):
vals = [val + " " * (width - len(val)) for val, width in zip(vals, widths)]
return self.delimiter.join(vals)
class MrtHeader(cds.CdsHeader):
_subfmt = "MRT"
def _split_float_format(self, value):
"""
Splits a Float string into different parts to find number
of digits after decimal and check if the value is in Scientific
notation.
Parameters
----------
value : str
String containing the float value to split.
Returns
-------
fmt: (int, int, int, bool, bool)
List of values describing the Float string.
(size, dec, ent, sign, exp)
size, length of the given string.
ent, number of digits before decimal point.
dec, number of digits after decimal point.
sign, whether or not given value signed.
exp, is value in Scientific notation?
"""
regfloat = re.compile(
r"""(?P<sign> [+-]*)
(?P<ent> [^eE.]+)
(?P<deciPt> [.]*)
(?P<decimals> [0-9]*)
(?P<exp> [eE]*-*)[0-9]*""",
re.VERBOSE,
)
mo = regfloat.match(value)
if mo is None:
raise Exception(f"{value} is not a float number")
return (
len(value),
len(mo.group("ent")),
len(mo.group("decimals")),
mo.group("sign") != "",
mo.group("exp") != "",
)
def _set_column_val_limits(self, col):
"""
Sets the ``col.min`` and ``col.max`` column attributes,
taking into account columns with Null values.
"""
col.max = max(col)
col.min = min(col)
if col.max is np.ma.core.MaskedConstant:
col.max = None
if col.min is np.ma.core.MaskedConstant:
col.min = None
def column_float_formatter(self, col):
"""
String formatter function for a column containing Float values.
Checks if the values in the given column are in Scientific notation,
by splitting the value string. It is assumed that the column either has
float values or Scientific notation.
A ``col.formatted_width`` attribute is added to the column. It is not added
if such an attribute is already present, say when the ``formats`` argument
is passed to the writer. A properly formatted format string is also added as
the ``col.format`` attribute.
Parameters
----------
col : A ``Table.Column`` object.
"""
# maxsize: maximum length of string containing the float value.
# maxent: maximum number of digits places before decimal point.
# maxdec: maximum number of digits places after decimal point.
# maxprec: maximum precision of the column values, sum of maxent and maxdec.
maxsize, maxprec, maxent, maxdec = 1, 0, 1, 0
sign = False
fformat = "F"
# Find maximum sized value in the col
for val in col.str_vals:
# Skip null values
if val is None or val == "":
continue
# Find format of the Float string
fmt = self._split_float_format(val)
# If value is in Scientific notation
if fmt[4] is True:
# if the previous column value was in normal Float format
# set maxsize, maxprec and maxdec to default.
if fformat == "F":
maxsize, maxprec, maxdec = 1, 0, 0
# Designate the column to be in Scientific notation.
fformat = "E"
else:
# Move to next column value if
# current value is not in Scientific notation
# but the column is designated as such because
# one of the previous values was.
if fformat == "E":
continue
if maxsize < fmt[0]:
maxsize = fmt[0]
if maxent < fmt[1]:
maxent = fmt[1]
if maxdec < fmt[2]:
maxdec = fmt[2]
if fmt[3]:
sign = True
if maxprec < fmt[1] + fmt[2]:
maxprec = fmt[1] + fmt[2]
if fformat == "E":
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = maxsize
if sign:
col.formatted_width += 1
# Number of digits after decimal is replaced by the precision
# for values in Scientific notation, when writing that Format.
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxprec)
col.format = str(col.formatted_width) + "." + str(maxdec) + "e"
else:
lead = ""
if (
getattr(col, "formatted_width", None) is None
): # If ``formats`` not passed.
col.formatted_width = maxent + maxdec + 1
if sign:
col.formatted_width += 1
elif col.format.startswith("0"):
# Keep leading zero, if already set in format - primarily for `seconds` columns
# in coordinates; may need extra case if this is to be also supported with `sign`.
lead = "0"
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxdec)
col.format = lead + col.fortran_format[1:] + "f"
def write_byte_by_byte(self):
"""
Writes the Byte-By-Byte description of the table.
Columns that are `astropy.coordinates.SkyCoord` or `astropy.time.TimeSeries`
objects or columns with values that are such objects are recognized as such,
and some predefined labels and description is used for them.
See the Vizier MRT Standard documentation in the link below for more details
on these. An example Byte-By-Byte table is shown here.
See: http://vizier.u-strasbg.fr/doc/catstd-3.1.htx
Example::
--------------------------------------------------------------------------------
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- names Description of names
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
25-31 E7.1 --- s [-9e+34/2.0] Description of s
33-35 I3 --- i [-30/67] Description of i
37-39 F3.1 --- sameF [5.0/5.0] Description of sameF
41-42 I2 --- sameI [20] Description of sameI
44-45 I2 h RAh Right Ascension (hour)
47-48 I2 min RAm Right Ascension (minute)
50-67 F18.15 s RAs Right Ascension (second)
69 A1 --- DE- Sign of Declination
70-71 I2 deg DEd Declination (degree)
73-74 I2 arcmin DEm Declination (arcmin)
76-91 F16.13 arcsec DEs Declination (arcsec)
--------------------------------------------------------------------------------
"""
# Get column widths
vals_list = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max(len(vals[i]) for vals in vals_list)
if self.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
startb = 1 # Byte count starts at 1.
# Set default width of the Bytes count column of the Byte-By-Byte table.
# This ``byte_count_width`` value helps align byte counts with respect
# to the hyphen using a format string.
byte_count_width = len(str(sum(widths) + len(self.cols) - 1))
# Format string for Start Byte and End Byte
singlebfmt = "{:" + str(byte_count_width) + "d}"
fmtb = singlebfmt + "-" + singlebfmt
# Add trailing single whitespaces to Bytes column for better visibility.
singlebfmt += " "
fmtb += " "
# Set default width of Label and Description Byte-By-Byte columns.
max_label_width, max_descrip_size = 7, 16
bbb = Table(
names=["Bytes", "Format", "Units", "Label", "Explanations"], dtype=[str] * 5
)
# Iterate over the columns to write Byte-By-Byte rows.
for i, col in enumerate(self.cols):
# Check if column is MaskedColumn
col.has_null = isinstance(col, MaskedColumn)
if col.format is not None:
col.formatted_width = max(len(sval) for sval in col.str_vals)
# Set MRTColumn type, size and format.
if np.issubdtype(col.dtype, np.integer):
# Integer formatter
self._set_column_val_limits(col)
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = max(len(str(col.max)), len(str(col.min)))
col.fortran_format = "I" + str(col.formatted_width)
if col.format is None:
col.format = ">" + col.fortran_format[1:]
elif np.issubdtype(col.dtype, np.dtype(float).type):
# Float formatter
self._set_column_val_limits(col)
self.column_float_formatter(col)
else:
# String formatter, ``np.issubdtype(col.dtype, str)`` is ``True``.
dtype = col.dtype.str
if col.has_null:
mcol = col
mcol.fill_value = ""
coltmp = Column(mcol.filled(), dtype=str)
dtype = coltmp.dtype.str
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = int(re.search(r"(\d+)$", dtype).group(1))
col.fortran_format = "A" + str(col.formatted_width)
col.format = str(col.formatted_width) + "s"
endb = col.formatted_width + startb - 1
# ``mixin`` columns converted to string valued columns will not have a name
# attribute. In those cases, a ``Unknown`` column label is put, indicating that
# such columns can be better formatted with some manipulation before calling
# the MRT writer.
if col.name is None:
col.name = "Unknown"
# Set column description.
if col.description is not None:
description = col.description
else:
description = "Description of " + col.name
# Set null flag in column description
nullflag = ""
if col.has_null:
nullflag = "?"
# Set column unit
if col.unit is not None:
col_unit = col.unit.to_string("cds")
elif col.name.lower().find("magnitude") > -1:
# ``col.unit`` can still be ``None``, if the unit of column values
# is ``Magnitude``, because ``astropy.units.Magnitude`` is actually a class.
# Unlike other units which are instances of ``astropy.units.Unit``,
# application of the ``Magnitude`` unit calculates the logarithm
# of the values. Thus, the only way to check for if the column values
# have ``Magnitude`` unit is to check the column name.
col_unit = "mag"
else:
col_unit = "---"
# Add col limit values to col description
lim_vals = ""
if (
col.min
and col.max
and not any(
x in col.name for x in ["RA", "DE", "LON", "LAT", "PLN", "PLT"]
)
):
# No col limit values for coordinate columns.
if col.fortran_format[0] == "I":
if (
abs(col.min) < MAX_COL_INTLIMIT
and abs(col.max) < MAX_COL_INTLIMIT
):
if col.min == col.max:
lim_vals = f"[{col.min}]"
else:
lim_vals = f"[{col.min}/{col.max}]"
elif col.fortran_format[0] in ("E", "F"):
lim_vals = (
f"[{floor(col.min * 100) / 100.}/{ceil(col.max * 100) / 100.}]"
)
if lim_vals != "" or nullflag != "":
description = f"{lim_vals}{nullflag} {description}"
# Find the maximum label and description column widths.
if len(col.name) > max_label_width:
max_label_width = len(col.name)
if len(description) > max_descrip_size:
max_descrip_size = len(description)
# Add a row for the Sign of Declination in the bbb table
if col.name == "DEd":
bbb.add_row(
[
singlebfmt.format(startb),
"A1",
"---",
"DE-",
"Sign of Declination",
]
)
col.fortran_format = "I2"
startb += 1
# Add Byte-By-Byte row to bbb table
bbb.add_row(
[
singlebfmt.format(startb)
if startb == endb
else fmtb.format(startb, endb),
"" if col.fortran_format is None else col.fortran_format,
col_unit,
"" if col.name is None else col.name,
description,
]
)
startb = endb + 2
# Properly format bbb columns
bbblines = StringIO()
bbb.write(
bbblines,
format="ascii.fixed_width_no_header",
delimiter=" ",
bookend=False,
delimiter_pad=None,
formats={
"Format": "<6s",
"Units": "<6s",
"Label": "<" + str(max_label_width) + "s",
"Explanations": "" + str(max_descrip_size) + "s",
},
)
# Get formatted bbb lines
bbblines = bbblines.getvalue().splitlines()
# ``nsplit`` is the number of whitespaces to prefix to long description
# lines in order to wrap them. It is the sum of the widths of the
# previous 4 columns plus the number of single spacing between them.
# The hyphen in the Bytes column is also counted.
nsplit = byte_count_width * 2 + 1 + 12 + max_label_width + 4
# Wrap line if it is too long
buff = ""
for newline in bbblines:
if len(newline) > MAX_SIZE_README_LINE:
buff += ("\n").join(
wrap(
newline,
subsequent_indent=" " * nsplit,
width=MAX_SIZE_README_LINE,
)
)
buff += "\n"
else:
buff += newline + "\n"
# Last value of ``endb`` is the sum of column widths after formatting.
self.linewidth = endb
# Remove the last extra newline character from Byte-By-Byte.
buff = buff[:-1]
return buff
def write(self, lines):
"""
Writes the Header of the MRT table, aka ReadMe, which
also contains the Byte-By-Byte description of the table.
"""
from astropy.coordinates import SkyCoord
# Recognised ``SkyCoord.name`` forms with their default column names (helio* require SunPy).
coord_systems = {
"galactic": ("GLAT", "GLON", "b", "l"),
"ecliptic": ("ELAT", "ELON", "lat", "lon"), # 'geocentric*ecliptic'
"heliographic": ("HLAT", "HLON", "lat", "lon"), # '_carrington|stonyhurst'
"helioprojective": ("HPLT", "HPLN", "Ty", "Tx"),
}
eqtnames = ["RAh", "RAm", "RAs", "DEd", "DEm", "DEs"]
# list to store indices of columns that are modified.
to_pop = []
# For columns that are instances of ``SkyCoord`` and other ``mixin`` columns
# or whose values are objects of these classes.
for i, col in enumerate(self.cols):
# If col is a ``Column`` object but its values are ``SkyCoord`` objects,
# convert the whole column to ``SkyCoord`` object, which helps in applying
# SkyCoord methods directly.
if not isinstance(col, SkyCoord) and isinstance(col[0], SkyCoord):
try:
col = SkyCoord(col)
except (ValueError, TypeError):
# If only the first value of the column is a ``SkyCoord`` object,
# the column cannot be converted to a ``SkyCoord`` object.
# These columns are converted to ``Column`` object and then converted
# to string valued column.
if not isinstance(col, Column):
col = Column(col)
col = Column([str(val) for val in col])
self.cols[i] = col
continue
# Replace single ``SkyCoord`` column by its coordinate components if no coordinate
# columns of the corresponding type exist yet.
if isinstance(col, SkyCoord):
# If coordinates are given in RA/DEC, divide each them into hour/deg,
# minute/arcminute, second/arcsecond columns.
if (
"ra" in col.representation_component_names.keys()
and len(set(eqtnames) - set(self.colnames)) == 6
):
ra_c, dec_c = col.ra.hms, col.dec.dms
coords = [
ra_c.h.round().astype("i1"),
ra_c.m.round().astype("i1"),
ra_c.s,
dec_c.d.round().astype("i1"),
dec_c.m.round().astype("i1"),
dec_c.s,
]
coord_units = [u.h, u.min, u.second, u.deg, u.arcmin, u.arcsec]
coord_descrip = [
"Right Ascension (hour)",
"Right Ascension (minute)",
"Right Ascension (second)",
"Declination (degree)",
"Declination (arcmin)",
"Declination (arcsec)",
]
for coord, name, coord_unit, descrip in zip(
coords, eqtnames, coord_units, coord_descrip
):
# Have Sign of Declination only in the DEd column.
if name in ["DEm", "DEs"]:
coord_col = Column(
list(np.abs(coord)),
name=name,
unit=coord_unit,
description=descrip,
)
else:
coord_col = Column(
list(coord),
name=name,
unit=coord_unit,
description=descrip,
)
# Set default number of digits after decimal point for the
# second values, and deg-min to (signed) 2-digit zero-padded integer.
if name == "RAs":
coord_col.format = "013.10f"
elif name == "DEs":
coord_col.format = "012.9f"
elif name == "RAh":
coord_col.format = "2d"
elif name == "DEd":
coord_col.format = "+03d"
elif name.startswith(("RA", "DE")):
coord_col.format = "02d"
self.cols.append(coord_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# For all other coordinate types, simply divide into two columns
# for latitude and longitude resp. with the unit used been as it is.
else:
frminfo = ""
for frame, latlon in coord_systems.items():
if (
frame in col.name
and len(set(latlon[:2]) - set(self.colnames)) == 2
):
if frame != col.name:
frminfo = f" ({col.name})"
lon_col = Column(
getattr(col, latlon[3]),
name=latlon[1],
description=f"{frame.capitalize()} Longitude{frminfo}",
unit=col.representation_component_units[latlon[3]],
format=".12f",
)
lat_col = Column(
getattr(col, latlon[2]),
name=latlon[0],
description=f"{frame.capitalize()} Latitude{frminfo}",
unit=col.representation_component_units[latlon[2]],
format="+.12f",
)
self.cols.append(lon_col)
self.cols.append(lat_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``SkyCoord`` columns that are not in the above three
# representations to string valued columns. Those could either be types not
# supported yet (e.g. 'helioprojective'), or already present and converted.
# If there were any extra ``SkyCoord`` columns of one kind after the first one,
# then their decomposition into their component columns has been skipped.
# This is done in order to not create duplicate component columns.
# Explicit renaming of the extra coordinate component columns by appending some
# suffix to their name, so as to distinguish them, is not yet implemented.
if i not in to_pop:
warnings.warn(
f"Coordinate system of type '{col.name}' already stored in"
" table as CDS/MRT-syle columns or of unrecognized type. So"
f" column {i} is being skipped with designation of a string"
f" valued column `{self.colnames[i]}`.",
UserWarning,
)
self.cols.append(Column(col.to_string(), name=self.colnames[i]))
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``mixin`` columns to ``Column`` objects.
# Parsing these may still lead to errors!
elif not isinstance(col, Column):
col = Column(col)
# If column values are ``object`` types, convert them to string.
if np.issubdtype(col.dtype, np.dtype(object).type):
col = Column([str(val) for val in col])
self.cols[i] = col
# Delete original ``SkyCoord`` columns, if there were any.
for i in to_pop[::-1]:
self.cols.pop(i)
# Check for any left over extra coordinate columns.
if any(x in self.colnames for x in ["RAh", "DEd", "ELON", "GLAT"]):
# At this point any extra ``SkyCoord`` columns should have been converted to string
# valued columns, together with issuance of a warning, by the coordinate parser above.
# This test is just left here as a safeguard.
for i, col in enumerate(self.cols):
if isinstance(col, SkyCoord):
self.cols[i] = Column(col.to_string(), name=self.colnames[i])
message = (
"Table already has coordinate system in CDS/MRT-syle columns. "
f"So column {i} should have been replaced already with "
f"a string valued column `{self.colnames[i]}`."
)
raise core.InconsistentTableError(message)
# Get Byte-By-Byte description and fill the template
bbb_template = Template("\n".join(BYTE_BY_BYTE_TEMPLATE))
byte_by_byte = bbb_template.substitute(
{"file": "table.dat", "bytebybyte": self.write_byte_by_byte()}
)
# Fill up the full ReadMe
rm_template = Template("\n".join(MRT_TEMPLATE))
readme_filled = rm_template.substitute({"bytebybyte": byte_by_byte})
lines.append(readme_filled)
class MrtData(cds.CdsData):
"""MRT table data reader"""
_subfmt = "MRT"
splitter_class = MrtSplitter
def write(self, lines):
self.splitter.delimiter = " "
fixedwidth.FixedWidthData.write(self, lines)
class Mrt(core.BaseReader):
"""AAS MRT (Machine-Readable Table) format table.
**Reading**
::
>>> from astropy.io import ascii
>>> table = ascii.read('data.mrt', format='mrt')
**Writing**
Use ``ascii.write(table, 'data.mrt', format='mrt')`` to write tables to
Machine Readable Table (MRT) format.
Note that the metadata of the table, apart from units, column names and
description, will not be written. These have to be filled in by hand later.
See also: :ref:`cds_mrt_format`.
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = "mrt"
_io_registry_format_aliases = ["mrt"]
_io_registry_can_write = True
_description = "MRT format table"
data_class = MrtData
header_class = MrtHeader
def write(self, table=None):
# Construct for writing empty table is not yet done.
if len(table) == 0:
raise NotImplementedError
self.data.header = self.header
self.header.position_line = None
self.header.start_line = None
# Create a copy of the ``table``, so that it the copy gets modified and
# written to the file, while the original table remains as it is.
table = table.copy()
return super().write(table)
|
5c745506f870d7da988678210799b1693e8f6e4d918b7859c3ef330c41c3ab4f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
latex.py:
Classes to read and write LaTeX tables
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
latexdicts = {
"AA": {
"tabletype": "table",
"header_start": r"\hline \hline",
"header_end": r"\hline",
"data_end": r"\hline",
},
"doublelines": {
"tabletype": "table",
"header_start": r"\hline \hline",
"header_end": r"\hline\hline",
"data_end": r"\hline\hline",
},
"template": {
"tabletype": "tabletype",
"caption": "caption",
"tablealign": "tablealign",
"col_align": "col_align",
"preamble": "preamble",
"header_start": "header_start",
"header_end": "header_end",
"data_start": "data_start",
"data_end": "data_end",
"tablefoot": "tablefoot",
"units": {"col1": "unit of col1", "col2": "unit of col2"},
},
}
RE_COMMENT = re.compile(r"(?<!\\)%") # % character but not \%
def add_dictval_to_list(adict, key, alist):
"""
Add a value from a dictionary to a list
Parameters
----------
adict : dictionary
key : hashable
alist : list
List where value should be added
"""
if key in adict:
if isinstance(adict[key], str):
alist.append(adict[key])
else:
alist.extend(adict[key])
def find_latex_line(lines, latex):
"""
Find the first line which matches a pattern
Parameters
----------
lines : list
List of strings
latex : str
Search pattern
Returns
-------
line_num : int, None
Line number. Returns None, if no match was found
"""
re_string = re.compile(latex.replace("\\", "\\\\"))
for i, line in enumerate(lines):
if re_string.match(line):
return i
else:
return None
class LatexInputter(core.BaseInputter):
def process_lines(self, lines):
return [lin.strip() for lin in lines]
class LatexSplitter(core.BaseSplitter):
"""Split LaTeX table data. Default delimiter is `&`."""
delimiter = "&"
def __call__(self, lines):
last_line = RE_COMMENT.split(lines[-1])[0].strip()
if not last_line.endswith(r"\\"):
lines[-1] = last_line + r"\\"
return super().__call__(lines)
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. Also remove
\\ at end of line"""
line = RE_COMMENT.split(line)[0]
line = line.strip()
if line.endswith(r"\\"):
line = line.rstrip(r"\\")
else:
raise core.InconsistentTableError(
r"Lines in LaTeX table have to end with \\"
)
return line
def process_val(self, val):
"""Remove whitespace and {} at the beginning or end of value."""
val = val.strip()
if val and (val[0] == "{") and (val[-1] == "}"):
val = val[1:-1]
return val
def join(self, vals):
"""Join values together and add a few extra spaces for readability"""
delimiter = " " + self.delimiter + " "
return delimiter.join(x.strip() for x in vals) + r" \\"
class LatexHeader(core.BaseHeader):
"""Class to read the header of Latex Tables"""
header_start = r"\begin{tabular}"
splitter_class = LatexSplitter
def start_line(self, lines):
line = find_latex_line(lines, self.header_start)
if line is not None:
return line + 1
else:
return None
def _get_units(self):
units = {}
col_units = [col.info.unit for col in self.cols]
for name, unit in zip(self.colnames, col_units):
if unit:
try:
units[name] = unit.to_string(format="latex_inline")
except AttributeError:
units[name] = unit
return units
def write(self, lines):
if "col_align" not in self.latex:
self.latex["col_align"] = len(self.cols) * "c"
if "tablealign" in self.latex:
align = "[" + self.latex["tablealign"] + "]"
else:
align = ""
if self.latex["tabletype"] is not None:
lines.append(r"\begin{" + self.latex["tabletype"] + r"}" + align)
add_dictval_to_list(self.latex, "preamble", lines)
if "caption" in self.latex:
lines.append(r"\caption{" + self.latex["caption"] + "}")
lines.append(self.header_start + r"{" + self.latex["col_align"] + r"}")
add_dictval_to_list(self.latex, "header_start", lines)
lines.append(self.splitter.join(self.colnames))
units = self._get_units()
if "units" in self.latex:
units.update(self.latex["units"])
if units:
lines.append(
self.splitter.join([units.get(name, " ") for name in self.colnames])
)
add_dictval_to_list(self.latex, "header_end", lines)
class LatexData(core.BaseData):
"""Class to read the data in LaTeX tables"""
data_start = None
data_end = r"\end{tabular}"
splitter_class = LatexSplitter
def start_line(self, lines):
if self.data_start:
return find_latex_line(lines, self.data_start)
else:
start = self.header.start_line(lines)
if start is None:
raise core.InconsistentTableError(r"Could not find table start")
return start + 1
def end_line(self, lines):
if self.data_end:
return find_latex_line(lines, self.data_end)
else:
return None
def write(self, lines):
add_dictval_to_list(self.latex, "data_start", lines)
core.BaseData.write(self, lines)
add_dictval_to_list(self.latex, "data_end", lines)
lines.append(self.data_end)
add_dictval_to_list(self.latex, "tablefoot", lines)
if self.latex["tabletype"] is not None:
lines.append(r"\end{" + self.latex["tabletype"] + "}")
class Latex(core.BaseReader):
r"""LaTeX format table.
This class implements some LaTeX specific commands. Its main
purpose is to write out a table in a form that LaTeX can compile. It
is beyond the scope of this class to implement every possible LaTeX
command, instead the focus is to generate a syntactically valid
LaTeX tables.
This class can also read simple LaTeX tables (one line per table
row, no ``\multicolumn`` or similar constructs), specifically, it
can read the tables that it writes.
Reading a LaTeX table, the following keywords are accepted:
**ignore_latex_commands** :
Lines starting with these LaTeX commands will be treated as comments (i.e. ignored).
When writing a LaTeX table, the some keywords can customize the
format. Care has to be taken here, because python interprets ``\\``
in a string as an escape character. In order to pass this to the
output either format your strings as raw strings with the ``r``
specifier or use a double ``\\\\``.
Examples::
caption = r'My table \label{mytable}'
caption = 'My table \\\\label{mytable}'
**latexdict** : Dictionary of extra parameters for the LaTeX output
* tabletype : used for first and last line of table.
The default is ``\\begin{table}``. The following would generate a table,
which spans the whole page in a two-column document::
ascii.write(data, sys.stdout, Writer = ascii.Latex,
latexdict = {'tabletype': 'table*'})
If ``None``, the table environment will be dropped, keeping only
the ``tabular`` environment.
* tablealign : positioning of table in text.
The default is not to specify a position preference in the text.
If, e.g. the alignment is ``ht``, then the LaTeX will be ``\\begin{table}[ht]``.
* col_align : Alignment of columns
If not present all columns will be centered.
* caption : Table caption (string or list of strings)
This will appear above the table as it is the standard in
many scientific publications. If you prefer a caption below
the table, just write the full LaTeX command as
``latexdict['tablefoot'] = r'\caption{My table}'``
* preamble, header_start, header_end, data_start, data_end, tablefoot: Pure LaTeX
Each one can be a string or a list of strings. These strings
will be inserted into the table without any further
processing. See the examples below.
* units : dictionary of strings
Keys in this dictionary should be names of columns. If
present, a line in the LaTeX table directly below the column
names is added, which contains the values of the
dictionary. Example::
from astropy.io import ascii
data = {'name': ['bike', 'car'], 'mass': [75,1200], 'speed': [10, 130]}
ascii.write(data, Writer=ascii.Latex,
latexdict = {'units': {'mass': 'kg', 'speed': 'km/h'}})
If the column has no entry in the ``units`` dictionary, it defaults
to the **unit** attribute of the column. If this attribute is not
specified (i.e. it is None), the unit will be written as ``' '``.
Run the following code to see where each element of the
dictionary is inserted in the LaTeX table::
from astropy.io import ascii
data = {'cola': [1,2], 'colb': [3,4]}
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['template'])
Some table styles are predefined in the dictionary
``ascii.latex.latexdicts``. The following generates in table in
style preferred by A&A and some other journals::
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['AA'])
As an example, this generates a table, which spans all columns
and is centered on the page::
ascii.write(data, Writer=ascii.Latex, col_align='|lr|',
latexdict={'preamble': r'\begin{center}',
'tablefoot': r'\end{center}',
'tabletype': 'table*'})
**caption** : Set table caption
Shorthand for::
latexdict['caption'] = caption
**col_align** : Set the column alignment.
If not present this will be auto-generated for centered
columns. Shorthand for::
latexdict['col_align'] = col_align
"""
_format_name = "latex"
_io_registry_format_aliases = ["latex"]
_io_registry_suffix = ".tex"
_description = "LaTeX table"
header_class = LatexHeader
data_class = LatexData
inputter_class = LatexInputter
# Strictly speaking latex only supports 1-d columns so this should inherit
# the base max_ndim = 1. But as reported in #11695 this causes a strange
# problem with Jupyter notebook, which displays a table by first calling
# _repr_latex_. For a multidimensional table this issues a stack traceback
# before moving on to _repr_html_. Here we prioritize fixing the issue with
# Jupyter displaying a Table with multidimensional columns.
max_ndim = None
def __init__(
self,
ignore_latex_commands=[
"hline",
"vspace",
"tableline",
"toprule",
"midrule",
"bottomrule",
],
latexdict={},
caption="",
col_align=None,
):
super().__init__()
self.latex = {}
# The latex dict drives the format of the table and needs to be shared
# with data and header
self.header.latex = self.latex
self.data.latex = self.latex
self.latex["tabletype"] = "table"
self.latex.update(latexdict)
if caption:
self.latex["caption"] = caption
if col_align:
self.latex["col_align"] = col_align
self.ignore_latex_commands = ignore_latex_commands
self.header.comment = "%|" + "|".join(
[r"\\" + command for command in self.ignore_latex_commands]
)
self.data.comment = self.header.comment
def write(self, table=None):
self.header.start_line = None
self.data.start_line = None
return core.BaseReader.write(self, table=table)
class AASTexHeaderSplitter(LatexSplitter):
r"""Extract column names from a `deluxetable`_.
This splitter expects the following LaTeX code **in a single line**:
\tablehead{\colhead{col1} & ... & \colhead{coln}}
"""
def __call__(self, lines):
return super(LatexSplitter, self).__call__(lines)
def process_line(self, line):
"""extract column names from tablehead"""
line = line.split("%")[0]
line = line.replace(r"\tablehead", "")
line = line.strip()
if (line[0] == "{") and (line[-1] == "}"):
line = line[1:-1]
else:
raise core.InconsistentTableError(r"\tablehead is missing {}")
return line.replace(r"\colhead", "")
def join(self, vals):
return " & ".join([r"\colhead{" + str(x) + "}" for x in vals])
class AASTexHeader(LatexHeader):
r"""In a `deluxetable
<http://fits.gsfc.nasa.gov/standard30/deluxetable.sty>`_ some header
keywords differ from standard LaTeX.
This header is modified to take that into account.
"""
header_start = r"\tablehead"
splitter_class = AASTexHeaderSplitter
def start_line(self, lines):
return find_latex_line(lines, r"\tablehead")
def write(self, lines):
if "col_align" not in self.latex:
self.latex["col_align"] = len(self.cols) * "c"
if "tablealign" in self.latex:
align = "[" + self.latex["tablealign"] + "]"
else:
align = ""
lines.append(
r"\begin{"
+ self.latex["tabletype"]
+ r"}{"
+ self.latex["col_align"]
+ r"}"
+ align
)
add_dictval_to_list(self.latex, "preamble", lines)
if "caption" in self.latex:
lines.append(r"\tablecaption{" + self.latex["caption"] + "}")
tablehead = " & ".join([r"\colhead{" + name + "}" for name in self.colnames])
units = self._get_units()
if "units" in self.latex:
units.update(self.latex["units"])
if units:
tablehead += r"\\ " + self.splitter.join(
[units.get(name, " ") for name in self.colnames]
)
lines.append(r"\tablehead{" + tablehead + "}")
class AASTexData(LatexData):
r"""In a `deluxetable`_ the data is enclosed in `\startdata` and `\enddata`"""
data_start = r"\startdata"
data_end = r"\enddata"
def start_line(self, lines):
return find_latex_line(lines, self.data_start) + 1
def write(self, lines):
lines.append(self.data_start)
lines_length_initial = len(lines)
core.BaseData.write(self, lines)
# To remove extra space(s) and // appended which creates an extra new line
# in the end.
if len(lines) > lines_length_initial:
lines[-1] = re.sub(r"\s* \\ \\ \s* $", "", lines[-1], flags=re.VERBOSE)
lines.append(self.data_end)
add_dictval_to_list(self.latex, "tablefoot", lines)
lines.append(r"\end{" + self.latex["tabletype"] + r"}")
class AASTex(Latex):
"""AASTeX format table.
This class implements some AASTeX specific commands.
AASTeX is used for the AAS (American Astronomical Society)
publications like ApJ, ApJL and AJ.
It derives from the ``Latex`` reader and accepts the same
keywords. However, the keywords ``header_start``, ``header_end``,
``data_start`` and ``data_end`` in ``latexdict`` have no effect.
"""
_format_name = "aastex"
_io_registry_format_aliases = ["aastex"]
_io_registry_suffix = "" # AASTex inherits from Latex, so override this class attr
_description = "AASTeX deluxetable used for AAS journals"
header_class = AASTexHeader
data_class = AASTexData
def __init__(self, **kwargs):
super().__init__(**kwargs)
# check if tabletype was explicitly set by the user
if not (("latexdict" in kwargs) and ("tabletype" in kwargs["latexdict"])):
self.latex["tabletype"] = "deluxetable"
|
1093e0497b2b91e7515bcc66d6a11fb080273c260186763cef04d0b77157f6fb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import contextlib
import re
import warnings
from collections import OrderedDict
from operator import itemgetter
import numpy as np
__all__ = ["IORegistryError"]
class IORegistryError(Exception):
"""Custom error for registry clashes."""
pass
# -----------------------------------------------------------------------------
class _UnifiedIORegistryBase(metaclass=abc.ABCMeta):
"""Base class for registries in Astropy's Unified IO.
This base class provides identification functions and miscellaneous
utilities. For an example how to build a registry subclass we suggest
:class:`~astropy.io.registry.UnifiedInputRegistry`, which enables
read-only registries. These higher-level subclasses will probably serve
better as a baseclass, for instance
:class:`~astropy.io.registry.UnifiedIORegistry` subclasses both
:class:`~astropy.io.registry.UnifiedInputRegistry` and
:class:`~astropy.io.registry.UnifiedOutputRegistry` to enable both
reading from and writing to files.
.. versionadded:: 5.0
"""
def __init__(self):
# registry of identifier functions
self._identifiers = OrderedDict()
# what this class can do: e.g. 'read' &/or 'write'
self._registries = dict()
self._registries["identify"] = dict(attr="_identifiers", column="Auto-identify")
self._registries_order = ("identify",) # match keys in `_registries`
# If multiple formats are added to one class the update of the docs is quite
# expensive. Classes for which the doc update is temporarily delayed are added
# to this set.
self._delayed_docs_classes = set()
@property
def available_registries(self):
"""Available registries.
Returns
-------
``dict_keys``
"""
return self._registries.keys()
def get_formats(self, data_class=None, filter_on=None):
"""
Get the list of registered formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class or None, optional
Filter readers/writer to match data class (default = all classes).
filter_on : str or None, optional
Which registry to show. E.g. "identify"
If None search for both. Default is None.
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
Raises
------
ValueError
If ``filter_on`` is not None nor a registry name.
"""
from astropy.table import Table
# set up the column names
colnames = (
"Data class",
"Format",
*[self._registries[k]["column"] for k in self._registries_order],
"Deprecated",
)
i_dataclass = colnames.index("Data class")
i_format = colnames.index("Format")
i_regstart = colnames.index(
self._registries[self._registries_order[0]]["column"]
)
i_deprecated = colnames.index("Deprecated")
# registries
regs = set()
for k in self._registries.keys() - {"identify"}:
regs |= set(getattr(self, self._registries[k]["attr"]))
format_classes = sorted(regs, key=itemgetter(0))
# the format classes from all registries except "identify"
rows = []
for fmt, cls in format_classes:
# see if can skip, else need to document in row
if data_class is not None and not self._is_best_match(
data_class, cls, format_classes
):
continue
# flags for each registry
has_ = {
k: "Yes" if (fmt, cls) in getattr(self, v["attr"]) else "No"
for k, v in self._registries.items()
}
# Check if this is a short name (e.g. 'rdb') which is deprecated in
# favor of the full 'ascii.rdb'.
ascii_format_class = ("ascii." + fmt, cls)
# deprecation flag
deprecated = "Yes" if ascii_format_class in format_classes else ""
# add to rows
rows.append(
(
cls.__name__,
fmt,
*[has_[n] for n in self._registries_order],
deprecated,
)
)
# filter_on can be in self_registries_order or None
if str(filter_on).lower() in self._registries_order:
index = self._registries_order.index(str(filter_on).lower())
rows = [row for row in rows if row[i_regstart + index] == "Yes"]
elif filter_on is not None:
raise ValueError(
'unrecognized value for "filter_on": {0}.\n'
f"Allowed are {self._registries_order} and None."
)
# Sorting the list of tuples is much faster than sorting it after the
# table is created. (#5262)
if rows:
# Indices represent "Data Class", "Deprecated" and "Format".
data = list(
zip(*sorted(rows, key=itemgetter(i_dataclass, i_deprecated, i_format)))
)
else:
data = None
# make table
# need to filter elementwise comparison failure issue
# https://github.com/numpy/numpy/issues/6784
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
format_table = Table(data, names=colnames)
if not np.any(format_table["Deprecated"].data == "Yes"):
format_table.remove_column("Deprecated")
return format_table
@contextlib.contextmanager
def delay_doc_updates(self, cls):
"""Contextmanager to disable documentation updates when registering
reader and writer. The documentation is only built once when the
contextmanager exits.
.. versionadded:: 1.3
Parameters
----------
cls : class
Class for which the documentation updates should be delayed.
Notes
-----
Registering multiple readers and writers can cause significant overhead
because the documentation of the corresponding ``read`` and ``write``
methods are build every time.
Examples
--------
see for example the source code of ``astropy.table.__init__``.
"""
self._delayed_docs_classes.add(cls)
yield
self._delayed_docs_classes.discard(cls)
for method in self._registries.keys() - {"identify"}:
self._update__doc__(cls, method)
# =========================================================================
# Identifier methods
def register_identifier(self, data_format, data_class, identifier, force=False):
"""
Associate an identifier function with a specific data type.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
identifier : function
A function that checks the argument specified to `read` or `write` to
determine whether the input can be interpreted as a table of type
``data_format``. This function should take the following arguments:
- ``origin``: A string ``"read"`` or ``"write"`` identifying whether
the file is to be opened for reading or writing.
- ``path``: The path to the file.
- ``fileobj``: An open file object to read the file's contents, or
`None` if the file could not be opened.
- ``*args``: Positional arguments for the `read` or `write`
function.
- ``**kwargs``: Keyword arguments for the `read` or `write`
function.
One or both of ``path`` or ``fileobj`` may be `None`. If they are
both `None`, the identifier will need to work from ``args[0]``.
The function should return True if the input can be identified
as being of format ``data_format``, and False otherwise.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
Examples
--------
To set the identifier based on extensions, for formats that take a
filename as a first argument, you can do for example
.. code-block:: python
from astropy.io.registry import register_identifier
from astropy.table import Table
def my_identifier(*args, **kwargs):
return isinstance(args[0], str) and args[0].endswith('.tbl')
register_identifier('ipac', Table, my_identifier)
unregister_identifier('ipac', Table)
"""
if not (data_format, data_class) in self._identifiers or force:
self._identifiers[(data_format, data_class)] = identifier
else:
raise IORegistryError(
f"Identifier for format {data_format!r} and class"
f" {data_class.__name__!r} is already defined"
)
def unregister_identifier(self, data_format, data_class):
"""
Unregister an identifier function
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that can be read/written.
"""
if (data_format, data_class) in self._identifiers:
self._identifiers.pop((data_format, data_class))
else:
raise IORegistryError(
f"No identifier defined for format {data_format!r} and class"
f" {data_class.__name__!r}"
)
def identify_format(self, origin, data_class_required, path, fileobj, args, kwargs):
"""Loop through identifiers to see which formats match.
Parameters
----------
origin : str
A string ``"read`` or ``"write"`` identifying whether the file is to be
opened for reading or writing.
data_class_required : object
The specified class for the result of `read` or the class that is to be
written.
path : str or path-like or None
The path to the file or None.
fileobj : file-like or None.
An open file object to read the file's contents, or ``None`` if the
file could not be opened.
args : sequence
Positional arguments for the `read` or `write` function. Note that
these must be provided as sequence.
kwargs : dict-like
Keyword arguments for the `read` or `write` function. Note that this
parameter must be `dict`-like.
Returns
-------
valid_formats : list
List of matching formats.
"""
valid_formats = []
for data_format, data_class in self._identifiers:
if self._is_best_match(data_class_required, data_class, self._identifiers):
if self._identifiers[(data_format, data_class)](
origin, path, fileobj, *args, **kwargs
):
valid_formats.append(data_format)
return valid_formats
# =========================================================================
# Utils
def _get_format_table_str(self, data_class, filter_on):
"""``get_formats()``, without column "Data class", as a str."""
format_table = self.get_formats(data_class, filter_on)
format_table.remove_column("Data class")
format_table_str = "\n".join(format_table.pformat(max_lines=-1))
return format_table_str
def _is_best_match(self, class1, class2, format_classes):
"""
Determine if class2 is the "best" match for class1 in the list
of classes. It is assumed that (class2 in classes) is True.
class2 is the the best match if:
- ``class1`` is a subclass of ``class2`` AND
- ``class2`` is the nearest ancestor of ``class1`` that is in classes
(which includes the case that ``class1 is class2``)
"""
if issubclass(class1, class2):
classes = {cls for fmt, cls in format_classes}
for parent in class1.__mro__:
if parent is class2: # class2 is closest registered ancestor
return True
if parent in classes: # class2 was superseded
return False
return False
def _get_valid_format(self, mode, cls, path, fileobj, args, kwargs):
"""
Returns the first valid format that can be used to read/write the data in
question. Mode can be either 'read' or 'write'.
"""
valid_formats = self.identify_format(mode, cls, path, fileobj, args, kwargs)
if len(valid_formats) == 0:
format_table_str = self._get_format_table_str(cls, mode.capitalize())
raise IORegistryError(
"Format could not be identified based on the"
" file name or contents, please provide a"
" 'format' argument.\n"
f"The available formats are:\n{format_table_str}"
)
elif len(valid_formats) > 1:
return self._get_highest_priority_format(mode, cls, valid_formats)
return valid_formats[0]
def _get_highest_priority_format(self, mode, cls, valid_formats):
"""
Returns the reader or writer with the highest priority. If it is a tie,
error.
"""
if mode == "read":
format_dict = self._readers
mode_loader = "reader"
elif mode == "write":
format_dict = self._writers
mode_loader = "writer"
best_formats = []
current_priority = -np.inf
for format in valid_formats:
try:
_, priority = format_dict[(format, cls)]
except KeyError:
# We could throw an exception here, but get_reader/get_writer handle
# this case better, instead maximally deprioritise the format.
priority = -np.inf
if priority == current_priority:
best_formats.append(format)
elif priority > current_priority:
best_formats = [format]
current_priority = priority
if len(best_formats) > 1:
raise IORegistryError(
"Format is ambiguous - options are:"
f" {', '.join(sorted(valid_formats, key=itemgetter(0)))}"
)
return best_formats[0]
def _update__doc__(self, data_class, readwrite):
"""
Update the docstring to include all the available readers / writers for
the ``data_class.read``/``data_class.write`` functions (respectively).
Don't update if the data_class does not have the relevant method.
"""
# abort if method "readwrite" isn't on data_class
if not hasattr(data_class, readwrite):
return
from .interface import UnifiedReadWrite
FORMATS_TEXT = "The available built-in formats are:"
# Get the existing read or write method and its docstring
class_readwrite_func = getattr(data_class, readwrite)
if not isinstance(class_readwrite_func.__doc__, str):
# No docstring--could just be test code, or possibly code compiled
# without docstrings
return
lines = class_readwrite_func.__doc__.splitlines()
# Find the location of the existing formats table if it exists
sep_indices = [ii for ii, line in enumerate(lines) if FORMATS_TEXT in line]
if sep_indices:
# Chop off the existing formats table, including the initial blank line
chop_index = sep_indices[0]
lines = lines[:chop_index]
# Find the minimum indent, skipping the first line because it might be odd
matches = [re.search(r"(\S)", line) for line in lines[1:]]
left_indent = " " * min(match.start() for match in matches if match)
# Get the available unified I/O formats for this class
# Include only formats that have a reader, and drop the 'Data class' column
format_table = self.get_formats(data_class, readwrite.capitalize())
format_table.remove_column("Data class")
# Get the available formats as a table, then munge the output of pformat()
# a bit and put it into the docstring.
new_lines = format_table.pformat(max_lines=-1, max_width=80)
table_rst_sep = re.sub("-", "=", new_lines[1])
new_lines[1] = table_rst_sep
new_lines.insert(0, table_rst_sep)
new_lines.append(table_rst_sep)
# Check for deprecated names and include a warning at the end.
if "Deprecated" in format_table.colnames:
new_lines.extend(
[
"",
"Deprecated format names like ``aastex`` will be "
"removed in a future version. Use the full ",
"name (e.g. ``ascii.aastex``) instead.",
]
)
new_lines = [FORMATS_TEXT, ""] + new_lines
lines.extend([left_indent + line for line in new_lines])
# Depending on Python version and whether class_readwrite_func is
# an instancemethod or classmethod, one of the following will work.
if isinstance(class_readwrite_func, UnifiedReadWrite):
class_readwrite_func.__class__.__doc__ = "\n".join(lines)
else:
try:
class_readwrite_func.__doc__ = "\n".join(lines)
except AttributeError:
class_readwrite_func.__func__.__doc__ = "\n".join(lines)
|
fd87b13cd68c08f986406cba7d4bf83836e777879d3896e91c22a07886759b2a | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import os
import re
import shutil
import sys
import warnings
import numpy as np
from astropy.io.fits.file import FILE_MODES, _File
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import (
_free_space_check,
_get_array_mmap,
_is_int,
_tmp_name,
fileobj_closed,
fileobj_mode,
ignore_sigint,
isfile,
)
from astropy.io.fits.verify import VerifyError, VerifyWarning, _ErrList, _Verify
from astropy.utils import indent
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
from astropy.utils.exceptions import AstropyUserWarning
from . import compressed
from .base import ExtensionHDU, _BaseHDU, _NonstandardHDU, _ValidHDU
from .groups import GroupsHDU
from .image import ImageHDU, PrimaryHDU
if HAS_BZ2:
import bz2
__all__ = ["HDUList", "fitsopen"]
# FITS file signature as per RFC 4047
FITS_SIGNATURE = b"SIMPLE = T"
def fitsopen(
name,
mode="readonly",
memmap=None,
save_backup=False,
cache=True,
lazy_load_hdus=None,
ignore_missing_simple=False,
*,
use_fsspec=None,
fsspec_kwargs=None,
**kwargs,
):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : str, file-like or `pathlib.Path`
File to be opened.
mode : str, optional
Open mode, 'readonly', 'update', 'append', 'denywrite', or
'ostream'. Default is 'readonly'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used? This value is obtained from the
configuration item ``astropy.io.fits.Conf.use_memmap``.
Default is `True`.
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that
a backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
Default is `False`.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache. Default is `True`.
lazy_load_hdus : bool, optional
To avoid reading all the HDUs and headers in a FITS file immediately
upon opening. This is an optimization especially useful for large
files, as FITS has no way of determining the number and offsets of all
the HDUs in a file without scanning through the file and reading all
the headers. Default is `True`.
To disable lazy loading and read all HDUs immediately (the old
behavior) use ``lazy_load_hdus=False``. This can lead to fewer
surprises--for example with lazy loading enabled, ``len(hdul)``
can be slow, as it means the entire FITS file needs to be read in
order to determine the number of HDUs. ``lazy_load_hdus=False``
ensures that all HDUs have already been loaded after the file has
been opened.
.. versionadded:: 1.3
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the central value and
``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data
with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as
``uint16`` data. Default is `True` so that the pseudo-unsigned
integer convention is assumed.
ignore_missing_end : bool, optional
Do not raise an exception when opening a file that is missing an
``END`` card in the last header. Default is `False`.
ignore_missing_simple : bool, optional
Do not raise an exception when the SIMPLE keyword is missing. Note
that io.fits will raise a warning if a SIMPLE card is present but
written in a way that does not follow the FITS Standard.
Default is `False`.
.. versionadded:: 4.2
checksum : bool, str, optional
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values
(when present in the HDU header) match the header and data of all HDU's
in the file. Updates to a file that already has a checksum will
preserve and update the existing checksums unless this argument is
given a value of 'remove', in which case the CHECKSUM and DATASUM
values are not checked, and are removed when saving changes to the
file. Default is `False`.
disable_image_compression : bool, optional
If `True`, treats compressed image HDU's like normal binary table
HDU's. Default is `False`.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. Default is `False`.
character_as_bytes : bool, optional
Whether to return bytes for string columns, otherwise unicode strings
are returned, but this does not respect memory mapping and loads the
whole column in memory when accessed. Default is `False`.
ignore_blank : bool, optional
If `True`, the BLANK keyword is ignored if present.
Default is `False`.
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled image
data, restore the data to the original type and reapply the original
BSCALE/BZERO values. This could lead to loss of accuracy if scaling
back to integer values after performing floating point operations on
the data. Default is `False`.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name`` starts with the Amazon S3 storage prefix ``s3://`` or the
Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g., ``http://``) but in this case you must
explicitly pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
Returns
-------
hdulist : `HDUList`
`HDUList` containing all of the header data units in the file.
"""
from astropy.io.fits import conf
if memmap is None:
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if conf.use_memmap else False
else:
memmap = bool(memmap)
if lazy_load_hdus is None:
lazy_load_hdus = conf.lazy_load_hdus
else:
lazy_load_hdus = bool(lazy_load_hdus)
if "uint" not in kwargs:
kwargs["uint"] = conf.enable_uint
if not name:
raise ValueError(f"Empty filename: {name!r}")
return HDUList.fromfile(
name,
mode,
memmap,
save_backup,
cache,
lazy_load_hdus,
ignore_missing_simple,
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
**kwargs,
)
class HDUList(list, _Verify):
"""
HDU list class. This is the top-level FITS object. When a FITS
file is opened, a `HDUList` object is returned.
"""
def __init__(self, hdus=[], file=None):
"""
Construct a `HDUList` object.
Parameters
----------
hdus : BaseHDU or sequence thereof, optional
The HDU object(s) to comprise the `HDUList`. Should be
instances of HDU classes like `ImageHDU` or `BinTableHDU`.
file : file-like, bytes, optional
The opened physical file associated with the `HDUList`
or a bytes object containing the contents of the FITS
file.
"""
if isinstance(file, bytes):
self._data = file
self._file = None
else:
self._file = file
self._data = None
# For internal use only--the keyword args passed to fitsopen /
# HDUList.fromfile/string when opening the file
self._open_kwargs = {}
self._in_read_next_hdu = False
# If we have read all the HDUs from the file or not
# The assumes that all HDUs have been written when we first opened the
# file; we do not currently support loading additional HDUs from a file
# while it is being streamed to. In the future that might be supported
# but for now this is only used for the purpose of lazy-loading of
# existing HDUs.
if file is None:
self._read_all = True
elif self._file is not None:
# Should never attempt to read HDUs in ostream mode
self._read_all = self._file.mode == "ostream"
else:
self._read_all = False
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise TypeError("Invalid input for HDUList.")
for idx, hdu in enumerate(hdus):
if not isinstance(hdu, _BaseHDU):
raise TypeError(f"Element {idx} in the HDUList input is not an HDU.")
super().__init__(hdus)
if file is None:
# Only do this when initializing from an existing list of HDUs
# When initializing from a file, this will be handled by the
# append method after the first HDU is read
self.update_extend()
def __len__(self):
if not self._in_read_next_hdu:
self.readall()
return super().__len__()
def __repr__(self):
# Special case: if the FITS file is located on a remote file system
# and has not been fully read yet, we return a simplified repr to
# avoid downloading the entire file. We can tell that a file is remote
# from the fact that the ``fsspec`` package was used to open it.
is_fsspec_file = self._file and "fsspec" in str(
self._file._file.__class__.__bases__
)
if not self._read_all and is_fsspec_file:
return f"{type(self)} (partially read)"
# In order to correctly repr an HDUList we need to load all the
# HDUs as well
self.readall()
return super().__repr__()
def __iter__(self):
# While effectively this does the same as:
# for idx in range(len(self)):
# yield self[idx]
# the more complicated structure is here to prevent the use of len(),
# which would break the lazy loading
for idx in itertools.count():
try:
yield self[idx]
except IndexError:
break
def __getitem__(self, key):
"""
Get an HDU from the `HDUList`, indexed by number or name.
"""
# If the key is a slice we need to make sure the necessary HDUs
# have been loaded before passing the slice on to super.
if isinstance(key, slice):
max_idx = key.stop
# Check for and handle the case when no maximum was
# specified (e.g. [1:]).
if max_idx is None:
# We need all of the HDUs, so load them
# and reset the maximum to the actual length.
max_idx = len(self)
# Just in case the max_idx is negative...
max_idx = self._positive_index_of(max_idx)
number_loaded = super().__len__()
if max_idx >= number_loaded:
# We need more than we have, try loading up to and including
# max_idx. Note we do not try to be clever about skipping HDUs
# even though key.step might conceivably allow it.
for i in range(number_loaded, max_idx):
# Read until max_idx or to the end of the file, whichever
# comes first.
if not self._read_next_hdu():
break
try:
hdus = super().__getitem__(key)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError(
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
else:
return HDUList(hdus)
# Originally this used recursion, but hypothetically an HDU with
# a very large number of HDUs could blow the stack, so use a loop
# instead
try:
return self._try_while_unread_hdus(
super().__getitem__, self._positive_index_of(key)
)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError(
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
def __contains__(self, item):
"""
Returns `True` if ``item`` is an ``HDU`` _in_ ``self`` or a valid
extension specification (e.g., integer extension number, extension
name, or a tuple of extension name and an extension version)
of a ``HDU`` in ``self``.
"""
try:
self._try_while_unread_hdus(self.index_of, item)
except (KeyError, ValueError):
return False
return True
def __setitem__(self, key, hdu):
"""
Set an HDU to the `HDUList`, indexed by number or name.
"""
_key = self._positive_index_of(key)
if isinstance(hdu, (slice, list)):
if _is_int(_key):
raise ValueError("An element in the HDUList must be an HDU.")
for item in hdu:
if not isinstance(item, _BaseHDU):
raise ValueError(f"{item} is not an HDU.")
else:
if not isinstance(hdu, _BaseHDU):
raise ValueError(f"{hdu} is not an HDU.")
try:
self._try_while_unread_hdus(super().__setitem__, _key, hdu)
except IndexError:
raise IndexError(f"Extension {key} is out of bound or not found.")
self._resize = True
self._truncate = False
def __delitem__(self, key):
"""
Delete an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
end_index = len(self)
else:
key = self._positive_index_of(key)
end_index = len(self) - 1
self._try_while_unread_hdus(super().__delitem__, key)
if key == end_index or key == -1 and not self._resize:
self._truncate = True
else:
self._truncate = False
self._resize = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
output_verify = self._open_kwargs.get("output_verify", "exception")
self.close(output_verify=output_verify)
@classmethod
def fromfile(
cls,
fileobj,
mode=None,
memmap=None,
save_backup=False,
cache=True,
lazy_load_hdus=True,
ignore_missing_simple=False,
**kwargs,
):
"""
Creates an `HDUList` instance from a file-like object.
The actual implementation of ``fitsopen()``, and generally shouldn't
be used directly. Use :func:`open` instead (and see its
documentation for details of the parameters accepted by this method).
"""
return cls._readfrom(
fileobj=fileobj,
mode=mode,
memmap=memmap,
save_backup=save_backup,
cache=cache,
ignore_missing_simple=ignore_missing_simple,
lazy_load_hdus=lazy_load_hdus,
**kwargs,
)
@classmethod
def fromstring(cls, data, **kwargs):
"""
Creates an `HDUList` instance from a string or other in-memory data
buffer containing an entire FITS file. Similar to
:meth:`HDUList.fromfile`, but does not accept the mode or memmap
arguments, as they are only relevant to reading from a file on disk.
This is useful for interfacing with other libraries such as CFITSIO,
and may also be useful for streaming applications.
Parameters
----------
data : str, buffer-like, etc.
A string or other memory buffer containing an entire FITS file.
Buffer-like objects include :class:`~bytes`, :class:`~bytearray`,
:class:`~memoryview`, and :class:`~numpy.ndarray`.
It should be noted that if that memory is read-only (such as a
Python string) the returned :class:`HDUList`'s data portions will
also be read-only.
**kwargs : dict
Optional keyword arguments. See
:func:`astropy.io.fits.open` for details.
Returns
-------
hdul : HDUList
An :class:`HDUList` object representing the in-memory FITS file.
"""
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype="ubyte", buffer=data)
except TypeError:
raise TypeError(
"The provided object {} does not contain an underlying "
"memory buffer. fromstring() requires an object that "
"supports the buffer interface such as bytes, buffer, "
"memoryview, ndarray, etc. This restriction is to ensure "
"that efficient access to the array/table data is possible."
"".format(data)
)
return cls._readfrom(data=data, **kwargs)
def fileinfo(self, index):
"""
Returns a dictionary detailing information about the locations
of the indexed HDU within any associated file. The values are
only valid after a read or write of the associated file with
no intervening changes to the `HDUList`.
Parameters
----------
index : int
Index of HDU for which info is to be returned.
Returns
-------
fileinfo : dict or None
The dictionary details information about the locations of
the indexed HDU within an associated file. Returns `None`
when the HDU is not associated with a file.
Dictionary contents:
========== ========================================================
Key Value
========== ========================================================
file File object associated with the HDU
filename Name of associated file object
filemode Mode in which the file was opened (readonly,
update, append, denywrite, ostream)
resized Flag that when `True` indicates that the data has been
resized since the last read/write so the returned values
may not be valid.
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ========================================================
"""
if self._file is not None:
output = self[index].fileinfo()
if not output:
# OK, the HDU associated with this index is not yet
# tied to the file associated with the HDUList. The only way
# to get the file object is to check each of the HDU's in the
# list until we find the one associated with the file.
f = None
for hdu in self:
info = hdu.fileinfo()
if info:
f = info["file"]
fm = info["filemode"]
break
output = {
"file": f,
"filemode": fm,
"hdrLoc": None,
"datLoc": None,
"datSpan": None,
}
output["filename"] = self._file.name
output["resized"] = self._wasresized()
else:
output = None
return output
def __copy__(self):
"""
Return a shallow copy of an HDUList.
Returns
-------
copy : `HDUList`
A shallow copy of this `HDUList` object.
"""
return self[:]
# Syntactic sugar for `__copy__()` magic method
copy = __copy__
def __deepcopy__(self, memo=None):
return HDUList([hdu.copy() for hdu in self])
def pop(self, index=-1):
"""Remove an item from the list and return it.
Parameters
----------
index : int, str, tuple of (string, int), optional
An integer value of ``index`` indicates the position from which
``pop()`` removes and returns an HDU. A string value or a tuple
of ``(string, int)`` functions as a key for identifying the
HDU to be removed and returned. If ``key`` is a tuple, it is
of the form ``(key, ver)`` where ``ver`` is an ``EXTVER``
value that must match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous the numeric index
must be used to index the duplicate HDU.
Returns
-------
hdu : BaseHDU
The HDU object at position indicated by ``index`` or having name
and version specified by ``index``.
"""
# Make sure that HDUs are loaded before attempting to pop
self.readall()
list_index = self.index_of(index)
return super().pop(list_index)
def insert(self, index, hdu):
"""
Insert an HDU into the `HDUList` at the given ``index``.
Parameters
----------
index : int
Index before which to insert the new HDU.
hdu : BaseHDU
The HDU object to insert
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError(f"{hdu} is not an HDU.")
num_hdus = len(self)
if index == 0 or num_hdus == 0:
if num_hdus != 0:
# We are inserting a new Primary HDU so we need to
# make the current Primary HDU into an extension HDU.
if isinstance(self[0], GroupsHDU):
raise ValueError(
"The current Primary HDU is a GroupsHDU. "
"It can't be made into an extension HDU, "
"so another HDU cannot be inserted before it."
)
hdu1 = ImageHDU(self[0].data, self[0].header)
# Insert it into position 1, then delete HDU at position 0.
super().insert(1, hdu1)
super().__delitem__(0)
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().insert(0, phdu)
index = 1
else:
if isinstance(hdu, GroupsHDU):
raise ValueError("A GroupsHDU must be inserted as a Primary HDU.")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
hdu = ImageHDU(hdu.data, hdu.header)
super().insert(index, hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def append(self, hdu):
"""
Append a new HDU to the `HDUList`.
Parameters
----------
hdu : BaseHDU
HDU to add to the `HDUList`.
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError("HDUList can only append an HDU.")
if len(self) > 0:
if isinstance(hdu, GroupsHDU):
raise ValueError("Can't append a GroupsHDU to a non-empty HDUList")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
# TODO: This isn't necessarily sufficient to copy the HDU;
# _header_offset and friends need to be copied too.
hdu = ImageHDU(hdu.data, hdu.header)
else:
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary
# HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().append(phdu)
super().append(hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def index_of(self, key):
"""
Get the index of an HDU from the `HDUList`.
Parameters
----------
key : int, str, tuple of (string, int) or BaseHDU
The key identifying the HDU. If ``key`` is a tuple, it is of the
form ``(name, ver)`` where ``ver`` is an ``EXTVER`` value that must
match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous (it shouldn't be
but it's not impossible) the numeric index must be used to index
the duplicate HDU.
When ``key`` is an HDU object, this function returns the
index of that HDU object in the ``HDUList``.
Returns
-------
index : int
The index of the HDU in the `HDUList`.
Raises
------
ValueError
If ``key`` is an HDU object and it is not found in the ``HDUList``.
KeyError
If an HDU specified by the ``key`` that is an extension number,
extension name, or a tuple of extension name and version is not
found in the ``HDUList``.
"""
if _is_int(key):
return key
elif isinstance(key, tuple):
_key, _ver = key
elif isinstance(key, _BaseHDU):
return self.index(key)
else:
_key = key
_ver = None
if not isinstance(_key, str):
raise KeyError(
"{} indices must be integers, extension names as strings, "
"or (extname, version) tuples; got {}"
"".format(self.__class__.__name__, _key)
)
_key = (_key.strip()).upper()
found = None
for idx, hdu in enumerate(self):
name = hdu.name
if isinstance(name, str):
name = name.strip().upper()
# 'PRIMARY' should always work as a reference to the first HDU
if (name == _key or (_key == "PRIMARY" and idx == 0)) and (
_ver is None or _ver == hdu.ver
):
found = idx
break
if found is None:
raise KeyError(f"Extension {key!r} not found.")
else:
return found
def _positive_index_of(self, key):
"""
Same as index_of, but ensures always returning a positive index
or zero.
(Really this should be called non_negative_index_of but it felt
too long.)
This means that if the key is a negative integer, we have to
convert it to the corresponding positive index. This means
knowing the length of the HDUList, which in turn means loading
all HDUs. Therefore using negative indices on HDULists is inherently
inefficient.
"""
index = self.index_of(key)
if index >= 0:
return index
if abs(index) > len(self):
raise IndexError(f"Extension {index} is out of bound or not found.")
return len(self) + index
def readall(self):
"""
Read data of all HDUs into memory.
"""
while self._read_next_hdu():
pass
@ignore_sigint
def flush(self, output_verify="fix", verbose=False):
"""
Force a write of the `HDUList` back to the file (for append and
update modes only).
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
verbose : bool
When `True`, print verbose messages
"""
if self._file.mode not in ("append", "update", "ostream"):
warnings.warn(
f"Flush for '{self._file.mode}' mode is not supported.",
AstropyUserWarning,
)
return
save_backup = self._open_kwargs.get("save_backup", False)
if save_backup and self._file.mode in ("append", "update"):
filename = self._file.name
if os.path.exists(filename):
# The the file doesn't actually exist anymore for some reason
# then there's no point in trying to make a backup
backup = filename + ".bak"
idx = 1
while os.path.exists(backup):
backup = filename + ".bak." + str(idx)
idx += 1
warnings.warn(
f"Saving a backup of {filename} to {backup}.",
AstropyUserWarning,
)
try:
shutil.copy(filename, backup)
except OSError as exc:
raise OSError(
"Failed to save backup to destination {}: {}".format(
filename, exc
)
)
self.verify(option=output_verify)
if self._file.mode in ("append", "ostream"):
for hdu in self:
if verbose:
try:
extver = str(hdu._header["extver"])
except KeyError:
extver = ""
# only append HDU's which are "new"
if hdu._new:
hdu._prewriteto(checksum=hdu._output_checksum)
with _free_space_check(self):
hdu._writeto(self._file)
if verbose:
print("append HDU", hdu.name, extver)
hdu._new = False
hdu._postwriteto()
elif self._file.mode == "update":
self._flush_update()
def update_extend(self):
"""
Make sure that if the primary header needs the keyword ``EXTEND`` that
it has it and it is correct.
"""
if not len(self):
return
if not isinstance(self[0], PrimaryHDU):
# A PrimaryHDU will be automatically inserted at some point, but it
# might not have been added yet
return
hdr = self[0].header
def get_first_ext():
try:
return self[1]
except IndexError:
return None
if "EXTEND" in hdr:
if not hdr["EXTEND"] and get_first_ext() is not None:
hdr["EXTEND"] = True
elif get_first_ext() is not None:
if hdr["NAXIS"] == 0:
hdr.set("EXTEND", True, after="NAXIS")
else:
n = hdr["NAXIS"]
hdr.set("EXTEND", True, after="NAXIS" + str(n))
def writeto(
self, fileobj, output_verify="exception", overwrite=False, checksum=False
):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : str, file-like or `pathlib.Path`
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
"""
if len(self) == 0:
warnings.warn("There is nothing to write.", AstropyUserWarning)
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, str) or fileobj_closed(fileobj)
mode = FILE_MODES[fileobj_mode(fileobj)] if isfile(fileobj) else "ostream"
# This can accept an open file object that's open to write only, or in
# append/update modes but only if the file doesn't exist.
fileobj = _File(fileobj, mode=mode, overwrite=overwrite)
hdulist = self.fromfile(fileobj)
try:
dirname = os.path.dirname(hdulist._file.name)
except (AttributeError, TypeError):
dirname = None
try:
with _free_space_check(self, dirname=dirname):
for hdu in self:
hdu._prewriteto(checksum=checksum)
hdu._writeto(hdulist._file)
hdu._postwriteto()
finally:
hdulist.close(output_verify=output_verify, closed=closed)
def close(self, output_verify="exception", verbose=False, closed=True):
"""
Close the associated FITS file and memmap object, if any.
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
verbose : bool
When `True`, print out verbose messages.
closed : bool
When `True`, close the underlying file object.
"""
try:
if (
self._file
and self._file.mode in ("append", "update")
and not self._file.closed
):
self.flush(output_verify=output_verify, verbose=verbose)
finally:
if self._file and closed and hasattr(self._file, "close"):
self._file.close()
# Give individual HDUs an opportunity to do on-close cleanup
for hdu in self:
hdu._close(closed=closed)
def info(self, output=None):
"""
Summarize the info of the HDUs in this `HDUList`.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file-like or bool, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the HDU info. Writes to ``sys.stdout`` by default.
"""
if output is None:
output = sys.stdout
if self._file is None:
name = "(No file associated with this HDUList)"
else:
name = self._file.name
results = [
f"Filename: {name}",
"No. Name Ver Type Cards Dimensions Format",
]
format = "{:3d} {:10} {:3} {:11} {:5d} {} {} {}"
default = ("", "", "", 0, (), "", "")
for idx, hdu in enumerate(self):
summary = hdu._summary()
if len(summary) < len(default):
summary += default[len(summary) :]
summary = (idx,) + summary
if output:
results.append(format.format(*summary))
else:
results.append(summary)
if output:
output.write("\n".join(results))
output.write("\n")
output.flush()
else:
return results[2:]
def filename(self):
"""
Return the file name associated with the HDUList object if one exists.
Otherwise returns None.
Returns
-------
filename : str
A string containing the file name associated with the HDUList
object if an association exists. Otherwise returns None.
"""
if self._file is not None:
if hasattr(self._file, "name"):
return self._file.name
return None
@classmethod
def _readfrom(
cls,
fileobj=None,
data=None,
mode=None,
memmap=None,
cache=True,
lazy_load_hdus=True,
ignore_missing_simple=False,
*,
use_fsspec=None,
fsspec_kwargs=None,
**kwargs,
):
"""
Provides the implementations from HDUList.fromfile and
HDUList.fromstring, both of which wrap this method, as their
implementations are largely the same.
"""
if fileobj is not None:
if not isinstance(fileobj, _File):
# instantiate a FITS file object (ffo)
fileobj = _File(
fileobj,
mode=mode,
memmap=memmap,
cache=cache,
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
)
# The Astropy mode is determined by the _File initializer if the
# supplied mode was None
mode = fileobj.mode
hdulist = cls(file=fileobj)
else:
if mode is None:
# The default mode
mode = "readonly"
hdulist = cls(file=data)
# This method is currently only called from HDUList.fromstring and
# HDUList.fromfile. If fileobj is None then this must be the
# fromstring case; the data type of ``data`` will be checked in the
# _BaseHDU.fromstring call.
if (
not ignore_missing_simple
and hdulist._file
and hdulist._file.mode != "ostream"
and hdulist._file.size > 0
):
pos = hdulist._file.tell()
# FITS signature is supposed to be in the first 30 bytes, but to
# allow reading various invalid files we will check in the first
# card (80 bytes).
simple = hdulist._file.read(80)
match_sig = simple[:29] == FITS_SIGNATURE[:-1] and simple[29:30] in (
b"T",
b"F",
)
if not match_sig:
# Check the SIMPLE card is there but not written correctly
match_sig_relaxed = re.match(rb"SIMPLE\s*=\s*[T|F]", simple)
if match_sig_relaxed:
warnings.warn(
"Found a SIMPLE card but its format doesn't"
" respect the FITS Standard",
VerifyWarning,
)
else:
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError(
"No SIMPLE card found, this file does not appear to "
"be a valid FITS file. If this is really a FITS file, "
"try with ignore_missing_simple=True"
)
hdulist._file.seek(pos)
# Store additional keyword args that were passed to fits.open
hdulist._open_kwargs = kwargs
if fileobj is not None and fileobj.writeonly:
# Output stream--not interested in reading/parsing
# the HDUs--just writing to the output file
return hdulist
# Make sure at least the PRIMARY HDU can be read
read_one = hdulist._read_next_hdu()
# If we're trying to read only and no header units were found,
# raise an exception
if not read_one and mode in ("readonly", "denywrite"):
# Close the file if necessary (issue #6168)
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError("Empty or corrupt FITS file")
if not lazy_load_hdus or kwargs.get("checksum") is True:
# Go ahead and load all HDUs
while hdulist._read_next_hdu():
pass
# initialize/reset attributes to be used in "update/append" mode
hdulist._resize = False
hdulist._truncate = False
return hdulist
def _try_while_unread_hdus(self, func, *args, **kwargs):
"""
Attempt an operation that accesses an HDU by index/name
that can fail if not all HDUs have been read yet. Keep
reading HDUs until the operation succeeds or there are no
more HDUs to read.
"""
while True:
try:
return func(*args, **kwargs)
except Exception:
if self._read_next_hdu():
continue
else:
raise
def _read_next_hdu(self):
"""
Lazily load a single HDU from the fileobj or data string the `HDUList`
was opened from, unless no further HDUs are found.
Returns True if a new HDU was loaded, or False otherwise.
"""
if self._read_all:
return False
saved_compression_enabled = compressed.COMPRESSION_ENABLED
fileobj, data, kwargs = self._file, self._data, self._open_kwargs
if fileobj is not None and fileobj.closed:
return False
try:
self._in_read_next_hdu = True
if (
"disable_image_compression" in kwargs
and kwargs["disable_image_compression"]
):
compressed.COMPRESSION_ENABLED = False
# read all HDUs
try:
if fileobj is not None:
try:
# Make sure we're back to the end of the last read
# HDU
if len(self) > 0:
last = self[len(self) - 1]
if last._data_offset is not None:
offset = last._data_offset + last._data_size
fileobj.seek(offset, os.SEEK_SET)
hdu = _BaseHDU.readfrom(fileobj, **kwargs)
except EOFError:
self._read_all = True
return False
except OSError:
# Close the file: see
# https://github.com/astropy/astropy/issues/6168
#
if self._file.close_on_error:
self._file.close()
if fileobj.writeonly:
self._read_all = True
return False
else:
raise
else:
if not data:
self._read_all = True
return False
hdu = _BaseHDU.fromstring(data, **kwargs)
self._data = data[hdu._data_offset + hdu._data_size :]
super().append(hdu)
if len(self) == 1:
# Check for an extension HDU and update the EXTEND
# keyword of the primary HDU accordingly
self.update_extend()
hdu._new = False
if "checksum" in kwargs:
hdu._output_checksum = kwargs["checksum"]
# check in the case there is extra space after the last HDU or
# corrupted HDU
except (VerifyError, ValueError) as exc:
warnings.warn(
"Error validating header for HDU #{} (note: Astropy "
"uses zero-based indexing).\n{}\n"
"There may be extra bytes after the last HDU or the "
"file is corrupted.".format(len(self), indent(str(exc))),
VerifyWarning,
)
del exc
self._read_all = True
return False
finally:
compressed.COMPRESSION_ENABLED = saved_compression_enabled
self._in_read_next_hdu = False
return True
def _verify(self, option="warn"):
errs = _ErrList([], unit="HDU")
# the first (0th) element must be a primary HDU
if (
len(self) > 0
and (not isinstance(self[0], PrimaryHDU))
and (not isinstance(self[0], _NonstandardHDU))
):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = "Fixed by inserting one as 0th HDU."
def fix(self=self):
self.insert(0, PrimaryHDU())
err = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)
errs.append(err)
if len(self) > 1 and (
"EXTEND" not in self[0].header or self[0].header["EXTEND"] is not True
):
err_text = (
"Primary HDU does not contain an EXTEND keyword "
"equal to T even though there are extension HDUs."
)
fix_text = "Fixed by inserting or updating the EXTEND keyword."
def fix(header=self[0].header):
naxis = header["NAXIS"]
if naxis == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(naxis)
header.set("EXTEND", value=True, after=after)
errs.append(
self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)
)
# each element calls their own verify
for idx, hdu in enumerate(self):
if idx > 0 and (not isinstance(hdu, ExtensionHDU)):
err_text = f"HDUList's element {str(idx)} is not an extension HDU."
err = self.run_option(option, err_text=err_text, fixable=False)
errs.append(err)
else:
result = hdu._verify(option)
if result:
errs.append(result)
return errs
def _flush_update(self):
"""Implements flushing changes to a file in update mode."""
for hdu in self:
# Need to all _prewriteto() for each HDU first to determine if
# resizing will be necessary
hdu._prewriteto(checksum=hdu._output_checksum, inplace=True)
try:
self._wasresized()
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
hdu._header._modified = False
finally:
for hdu in self:
hdu._postwriteto()
def _flush_resize(self):
"""
Implements flushing changes in update mode when parts of one or more HDU
need to be resized.
"""
old_name = self._file.name
old_memmap = self._file.memmap
name = _tmp_name(old_name)
if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
if self._file.compression == "gzip":
new_file = gzip.GzipFile(name, mode="ab+")
elif self._file.compression == "bzip2":
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
new_file = bz2.BZ2File(name, mode="w")
else:
new_file = name
with self.fromfile(new_file, mode="append") as hdulist:
for hdu in self:
hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith("win"):
# Collect a list of open mmaps to the data; this well be
# used later. See below.
mmaps = [
(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self)
if hdu._has_data
]
hdulist._file.close()
self._file.close()
if sys.platform.startswith("win"):
# Close all open mmaps to the data. This is only necessary on
# Windows, which will not allow a file to be renamed or deleted
# until all handles to that file have been closed.
for idx, mmap, arr in mmaps:
if mmap is not None:
mmap.close()
os.remove(self._file.name)
# reopen the renamed new file with "update" mode
os.rename(name, old_name)
os.chmod(old_name, old_mode)
if isinstance(new_file, gzip.GzipFile):
old_file = gzip.GzipFile(old_name, mode="rb+")
else:
old_file = old_name
ffo = _File(old_file, mode="update", memmap=old_memmap)
self._file = ffo
for hdu in self:
# Need to update the _file attribute and close any open mmaps
# on each HDU
if hdu._has_data and _get_array_mmap(hdu.data) is not None:
del hdu.data
hdu._file = ffo
if sys.platform.startswith("win"):
# On Windows, all the original data mmaps were closed above.
# However, it's possible that the user still has references to
# the old data which would no longer work (possibly even cause
# a segfault if they try to access it). This replaces the
# buffers used by the original arrays with the buffers of mmap
# arrays created from the new file. This seems to work, but
# it's a flaming hack and carries no guarantees that it won't
# lead to odd behavior in practice. Better to just not keep
# references to data from files that had to be resized upon
# flushing (on Windows--again, this is no problem on Linux).
for idx, mmap, arr in mmaps:
if mmap is not None:
# https://github.com/numpy/numpy/issues/8628
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
arr.data = self[idx].data.data
del mmaps # Just to be sure
else:
# The underlying file is not a file object, it is a file like
# object. We can't write out to a file, we must update the file
# like object in place. To do this, we write out to a temporary
# file, then delete the contents in our file like object, then
# write the contents of the temporary file to the now empty file
# like object.
self.writeto(name)
hdulist = self.fromfile(name)
ffo = self._file
ffo.truncate(0)
ffo.seek(0)
for hdu in hdulist:
hdu._writeto(ffo, inplace=True, copy=True)
# Close the temporary file and delete it.
hdulist.close()
os.remove(hdulist._file.name)
# reset the resize attributes after updating
self._resize = False
self._truncate = False
for hdu in self:
hdu._header._modified = False
hdu._new = False
hdu._file = ffo
def _wasresized(self, verbose=False):
"""
Determine if any changes to the HDUList will require a file resize
when flushing the file.
Side effect of setting the objects _resize attribute.
"""
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
nbytes = len(str(hdu._header))
if nbytes != (hdu._data_offset - hdu._header_offset):
self._resize = True
self._truncate = False
if verbose:
print("One or more header is resized.")
break
# Data:
if not hdu._has_data:
continue
nbytes = hdu.size
nbytes = nbytes + _pad_length(nbytes)
if nbytes != hdu._data_size:
self._resize = True
self._truncate = False
if verbose:
print("One or more data area is resized.")
break
if self._truncate:
try:
self._file.truncate(hdu._data_offset + hdu._data_size)
except OSError:
self._resize = True
self._truncate = False
return self._resize
|
eb5201073eb6e4270d025c368801c0ffc84004260e68f2368c58f228a3c56be5 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import io
import os
import pathlib
import warnings
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.io import fits
from astropy.io.fits import printdiff
from astropy.io.fits.connect import REMOVE_KEYWORDS
from astropy.io.fits.tests.test_table import _assert_attr_col
from astropy.table import Table
from astropy.utils.exceptions import AstropyUserWarning
from .conftest import FitsTestCase
class TestConvenience(FitsTestCase):
def test_resource_warning(self):
warnings.simplefilter("always", ResourceWarning)
_ = fits.getdata(self.data("test0.fits"))
_ = fits.getheader(self.data("test0.fits"))
def test_fileobj_not_closed(self):
"""
Tests that file-like objects are not closed after being passed
to convenience functions.
Regression test for https://github.com/astropy/astropy/issues/5063
"""
f = open(self.data("test0.fits"), "rb")
_ = fits.getdata(f)
assert not f.closed
f.seek(0)
_ = fits.getheader(f)
assert not f.closed
f.close() # Close it now
def test_table_to_hdu(self):
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
with pytest.warns(
u.UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.table_to_hdu(table)
assert len(w) == 1
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert isinstance(hdu, fits.BinTableHDU)
filename = self.temp("test_table_to_hdu.fits")
hdu.writeto(filename, overwrite=True)
def test_masked_table_to_hdu(self):
i = np.ma.MaskedArray([1, 2, 3], mask=[True, False, False])
s = np.ma.MaskedArray(["a", "b", "c"], mask=[False, True, True])
c = np.ma.MaskedArray([2.3 + 1j, 4.5 + 0j, 6.7 - 1j], mask=[True, False, True])
f = np.ma.MaskedArray([2.3, 4.5, 6.7], mask=[True, False, True])
table = Table([i, s, c, f], names=["i", "s", "c", "f"])
# Check that FITS standard is used in replacing masked values.
hdu = fits.table_to_hdu(table)
assert isinstance(hdu, fits.BinTableHDU)
assert hdu.header["TNULL1"] == i.fill_value
assert_array_equal(hdu.data["i"], i.filled())
assert_array_equal(hdu.data["s"], s.filled(""))
assert_array_equal(hdu.data["c"], c.filled(np.nan))
assert_array_equal(hdu.data["c"].real, c.real.filled(np.nan))
assert_array_equal(hdu.data["c"].imag, c.imag.filled(np.nan))
assert_array_equal(hdu.data["c"], c.filled(complex(np.nan, np.nan)))
assert_array_equal(hdu.data["f"], f.filled(np.nan))
filename = self.temp("test_table_to_hdu.fits")
hdu.writeto(filename, overwrite=True)
def test_table_non_stringifyable_unit_to_hdu(self):
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = u.core.IrreducibleUnit("test")
with pytest.warns(
AstropyUserWarning, match="The unit 'test' could not be saved"
) as w:
fits.table_to_hdu(table)
assert len(w) == 1
def test_table_to_hdu_convert_comment_convention(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table.meta["comments"] = ["This", "is", "a", "comment"]
hdu = fits.table_to_hdu(table)
assert hdu.header.get("comment") == ["This", "is", "a", "comment"]
with pytest.raises(ValueError):
hdu.header.index("comments")
def test_table_to_hdu_filter_reserved(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9387
"""
diag = "be ignored since it conflicts with a FITS reserved keyword"
ins_cards = {
"EXPTIME": 32.1,
"XTENSION": "NEWTABLE",
"NAXIS": 1,
"NAXIS1": 3,
"NAXIS2": 9,
"PCOUNT": 42,
"OBSERVER": "Adams",
}
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i4", "U1", "f8"],
)
table.meta.update(ins_cards)
with pytest.warns(
AstropyUserWarning, match=rf"Meta-data keyword \w+ will {diag}"
) as w:
hdu = fits.table_to_hdu(table)
# This relies on the warnings being raised in the order of the
# meta dict (note that the first and last card are legitimate keys)
assert len(w) == len(ins_cards) - 2
for i, key in enumerate(list(ins_cards)[1:-1]):
assert f"Meta-data keyword {key}" in str(w[i].message)
assert hdu.header.get("XTENSION") == "BINTABLE"
assert hdu.header.get("NAXIS") == 2
assert hdu.header.get("NAXIS1") == 13
assert hdu.header.get("NAXIS2") == 3
assert hdu.header.get("PCOUNT") == 0
np.testing.assert_almost_equal(hdu.header.get("EXPTIME"), 3.21e1)
@pytest.mark.parametrize("card", REMOVE_KEYWORDS)
def test_table_to_hdu_warn_reserved(self, card):
"""
Test warning for each keyword in ..connect.REMOVE_KEYWORDS, 1 by 1
"""
diag = "be ignored since it conflicts with a FITS reserved keyword"
res_cards = {
"XTENSION": "BINTABLE",
"BITPIX": 8,
"NAXIS": 2,
"NAXIS1": 12,
"NAXIS2": 3,
"PCOUNT": 0,
"GCOUNT": 1,
"TFIELDS": 2,
"THEAP": None,
}
ins_cards = {
"XTENSION": "TABLE",
"BITPIX": 16,
"NAXIS": 1,
"NAXIS1": 2,
"NAXIS2": 6,
"PCOUNT": 2,
"GCOUNT": 2,
"TFIELDS": 4,
"THEAP": 36,
}
table = Table(
[[1.0, 2.0, 3.0], [2.3, 4.5, 6.7]],
names=["wavelength", "flux"],
dtype=["f8", "f4"],
)
table.meta["ORIGIN"] = "Min.Silly Walks"
table.meta[card] = ins_cards[card]
assert table.meta.get(card) != res_cards[card]
with pytest.warns(
AstropyUserWarning, match=f"Meta-data keyword {card} will {diag}"
):
hdu = fits.table_to_hdu(table)
assert hdu.header.get(card) == res_cards[card]
assert hdu.header.get("ORIGIN") == "Min.Silly Walks"
def test_table_to_hdu_filter_incompatible(self):
"""
Test removal of unsupported data types from header
"""
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i4", "U1", "f8"],
)
table.meta.update(
{
"OBSDATE": "2001-05-26",
"RAMP": np.arange(5),
"TARGETS": {"PRIMARY": 1, "SECONDAR": 3},
}
)
with pytest.warns(
AstropyUserWarning,
match=r"Attribute \S+ of type "
r".+ cannot be added to FITS Header - skipping",
):
hdu = fits.table_to_hdu(table)
assert hdu.header.get("OBSDATE") == "2001-05-26"
assert "RAMP" not in hdu.header
assert "TARGETS" not in hdu.header
def test_table_writeto_header(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5988
"""
data = np.zeros((5,), dtype=[("x", float), ("y", int)])
h_in = fits.Header()
h_in["ANSWER"] = (42.0, "LTU&E")
filename = self.temp("tabhdr42.fits")
fits.writeto(filename, data=data, header=h_in, overwrite=True)
h_out = fits.getheader(filename, ext=1)
assert h_out["ANSWER"] == 42
def test_image_extension_update_header(self, home_is_temp):
"""
Test that _makehdu correctly includes the header. For example in the
fits.update convenience function.
"""
filename = self.temp("twoextension.fits")
hdus = [fits.PrimaryHDU(np.zeros((10, 10))), fits.ImageHDU(np.zeros((10, 10)))]
# Try to update a non-existent file
with pytest.raises(FileNotFoundError, match="No such file"):
fits.update(
filename, np.zeros((10, 10)), header=fits.Header([("WHAT", 100)]), ext=1
)
fits.HDUList(hdus).writeto(filename)
fits.update(
filename, np.zeros((10, 10)), header=fits.Header([("WHAT", 100)]), ext=1
)
h_out = fits.getheader(filename, ext=1)
assert h_out["WHAT"] == 100
def test_printdiff(self):
"""
Test that FITSDiff can run the different inputs without crashing.
"""
# Testing different string input options
assert printdiff(self.data("arange.fits"), self.data("blank.fits")) is None
assert (
printdiff(self.data("arange.fits"), self.data("blank.fits"), ext=0) is None
)
assert (
printdiff(
self.data("o4sp040b0_raw.fits"),
self.data("o4sp040b0_raw.fits"),
extname="sci",
)
is None
)
# This may seem weird, but check printdiff to see, need to test
# incorrect second file
with pytest.raises(OSError):
printdiff("o4sp040b0_raw.fits", "fakefile.fits", extname="sci")
# Test HDU object inputs
with fits.open(self.data("stddata.fits"), mode="readonly") as in1:
with fits.open(self.data("checksum.fits"), mode="readonly") as in2:
assert printdiff(in1[0], in2[0]) is None
with pytest.raises(ValueError):
printdiff(in1[0], in2[0], ext=0)
assert printdiff(in1, in2) is None
with pytest.raises(NotImplementedError):
printdiff(in1, in2, 0)
def test_tabledump(self):
"""
A simple test of the dump method.
Also regression test for https://github.com/astropy/astropy/issues/6937
"""
datastr = (
'" 1" "abc" " 3.70000007152557" " 0"\n'
'" 2" "xy " " 6.69999971389771" " 1"\n'
)
cdstr = (
'c1 1J I11 "" ""'
' -2147483647 "" "" \n'
'c2 3A A3 "" ""'
' "" "" "" \n'
'c3 1E G15.7 "" ""'
' "" 3 0.4 \n'
'c4 1L L6 "" ""'
' "" "" "" \n'
)
# copy fits file to the temp directory
self.copy_file("tb.fits")
# test without datafile
fits.tabledump(self.temp("tb.fits"))
assert os.path.isfile(self.temp("tb_1.txt"))
# test with datafile
fits.tabledump(self.temp("tb.fits"), datafile=self.temp("test_tb.txt"))
assert os.path.isfile(self.temp("test_tb.txt"))
# test with datafile and cdfile
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
fits.tabledump(self.temp("tb.fits"), datafile, cdfile)
assert os.path.isfile(datafile)
with open(datafile) as data:
assert data.read() == datastr
with open(cdfile) as coldefs:
assert coldefs.read() == cdstr
@pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
# copy fits file to the temp directory
self.copy_file(tablename)
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
fits.tabledump(self.temp(tablename), datafile, cdfile, hfile)
new_tbhdu = fits.tableload(datafile, cdfile, hfile)
with fits.open(self.temp(tablename)) as hdul:
_assert_attr_col(new_tbhdu, hdul[1])
def test_append_filename(self, home_is_temp):
"""
Test fits.append with a filename argument.
"""
data = np.arange(6)
testfile = self.temp("test_append_1.fits")
# Test case 1: creation of file
fits.append(testfile, data=data, checksum=True)
# Test case 2: append to existing file, with verify=True
# Also test that additional keyword can be passed to fitsopen
fits.append(testfile, data=data * 2, checksum=True, ignore_blank=True)
# Test case 3: append to existing file, with verify=False
fits.append(testfile, data=data * 3, checksum=True, verify=False)
with fits.open(testfile, checksum=True) as hdu1:
np.testing.assert_array_equal(hdu1[0].data, data)
np.testing.assert_array_equal(hdu1[1].data, data * 2)
np.testing.assert_array_equal(hdu1[2].data, data * 3)
@pytest.mark.parametrize("mode", ["wb", "wb+", "ab", "ab+"])
def test_append_filehandle(self, tmp_path, mode):
"""
Test fits.append with a file handle argument.
"""
append_file = tmp_path / "append.fits"
with append_file.open(mode) as handle:
fits.append(filename=handle, data=np.ones((4, 4)))
def test_append_with_header(self):
"""
Test fits.append with a fits Header, which triggers detection of the
HDU class. Regression test for
https://github.com/astropy/astropy/issues/8660
"""
testfile = self.temp("test_append_1.fits")
with fits.open(self.data("test0.fits")) as hdus:
for hdu in hdus:
fits.append(testfile, hdu.data, hdu.header, checksum=True)
with fits.open(testfile, checksum=True) as hdus:
assert len(hdus) == 5
def test_pathlib(self):
testfile = pathlib.Path(self.temp("test.fits"))
data = np.arange(10)
hdulist = fits.HDUList([fits.PrimaryHDU(data)])
hdulist.writeto(testfile)
with fits.open(testfile) as hdul:
np.testing.assert_array_equal(hdul[0].data, data)
def test_getdata_ext_given(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=2 * np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
for ext in [0, 1, 2]:
buf.seek(0)
data = fits.getdata(buf, ext=ext)
assert data[0, 0] == ext
def test_getdata_ext_given_nodata(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(IndexError, match="No data in HDU #2."):
fits.getdata(buf, ext=2)
def test_getdata_ext_not_given_with_data_in_primary(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 0
def test_getdata_ext_not_given_with_data_in_ext(self):
# tests fallback mechanism
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 1
def test_getdata_ext_not_given_nodata_any(self):
# tests exception raised when there is no data in either
# Primary HDU or first extension HDU
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError, match="No data in either Primary or first extension HDUs."
):
fits.getdata(buf)
def test_getdata_ext_not_given_nodata_noext(self):
# tests exception raised when there is no data in the
# Primary HDU and there are no extension HDUs
prihdu = fits.PrimaryHDU(data=None)
hdulist = fits.HDUList([prihdu])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError, match="No data in Primary HDU and no extension HDU found."
):
fits.getdata(buf)
|
faceb98eb4a2a3ebb26b3c322f667b0429b6a604830dbbdb7c8726609d5c2c52 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some methods related to ``CDS`` format
reader/writer.
Requires `pyyaml <https://pyyaml.org/>`_ to be installed.
"""
from io import StringIO
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import ascii
from astropy.table import Column, MaskedColumn, Table
from astropy.time import Time
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_almost_equal
test_dat = [
"names e d s i",
"HD81809 1E-7 22.25608 +2 67",
"HD103095 -31.6e5 +27.2500 -9E34 -30",
]
def test_roundtrip_mrt_table():
"""
Tests whether or not the CDS writer can roundtrip a table,
i.e. read a table to ``Table`` object and write it exactly
as it is back to a file. Since, presently CDS uses a
MRT format template while writing, only the Byte-By-Byte
and the data section of the table can be compared between
original and the newly written table.
Further, the CDS Reader does not have capability to recognize
column format from the header of a CDS/MRT table, so this test
can work for a limited set of simple tables, which don't have
whitespaces in the column values or mix-in columns. Because of
this the written table output cannot be directly matched with
the original file and have to be checked against a list of lines.
Masked columns are read properly though, and thus are being tested
during round-tripping.
The difference between ``cdsFunctional2.dat`` file and ``exp_output``
is the following:
* Metadata is different because MRT template is used for writing.
* Spacing between ``Label`` and ``Explanations`` column in the
Byte-By-Byte.
* Units are written as ``[cm.s-2]`` and not ``[cm/s2]``, since both
are valid according to CDS/MRT standard.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 7 A7 --- ID Star ID ",
" 9-12 I4 K Teff [4337/4654] Effective temperature ",
"14-17 F4.2 [cm.s-2] logg [0.77/1.28] Surface gravity ",
"19-22 F4.2 km.s-1 vturb [1.23/1.82] Micro-turbulence velocity",
"24-28 F5.2 [-] [Fe/H] [-2.11/-1.5] Metallicity ",
"30-33 F4.2 [-] e_[Fe/H] ? rms uncertainty on [Fe/H] ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"S05-5 4337 0.77 1.80 -2.07 ",
"S08-229 4625 1.23 1.23 -1.50 ",
"S05-10 4342 0.91 1.82 -2.11 0.14",
"S05-47 4654 1.28 1.74 -1.64 0.16",
]
dat = get_pkg_data_filename(
"data/cdsFunctional2.dat", package="astropy.io.ascii.tests"
)
t = Table.read(dat, format="ascii.mrt")
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
assert lines == exp_output
def test_write_byte_by_byte_units():
t = ascii.read(test_dat)
col_units = [None, u.C, u.kg, u.m / u.s, u.year]
t._set_column_attribute("unit", col_units)
# Add a column with magnitude units.
# Note that magnitude has to be assigned for each value explicitly.
t["magnitude"] = [u.Magnitude(25), u.Magnitude(-9)]
col_units.append(u.mag)
out = StringIO()
t.write(out, format="ascii.mrt")
# Read written table.
tRead = ascii.read(out.getvalue(), format="cds")
assert [tRead[col].unit for col in tRead.columns] == col_units
def test_write_readme_with_default_options():
exp_output = [
"Title:",
"Authors:",
"Table:",
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [-3160000.0/0.01] Description of e",
"16-23 F8.5 --- d [22.25/27.25] Description of d ",
"25-31 E7.1 --- s [-9e+34/2.0] Description of s ",
"33-35 I3 --- i [-30/67] Description of i ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 22.25608 2e+00 67",
"HD103095 -3e+06 27.25000 -9e+34 -30",
]
t = ascii.read(test_dat)
out = StringIO()
t.write(out, format="ascii.mrt")
assert out.getvalue().splitlines() == exp_output
def test_write_empty_table():
out = StringIO()
import pytest
with pytest.raises(NotImplementedError):
Table().write(out, format="ascii.mrt")
def test_write_null_data_values():
exp_output = [
"HD81809 1e-07 22.25608 2.0e+00 67",
"HD103095 -3e+06 27.25000 -9.0e+34 -30",
"Sun 5.3e+27 ",
]
t = ascii.read(test_dat)
t.add_row(
["Sun", "3.25", "0", "5.3e27", "2"], mask=[False, True, True, False, True]
)
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))]
lines = lines[i_secs[-1] + 1 :] # Last section is the data.
assert lines == exp_output
def test_write_byte_by_byte_for_masked_column():
"""
This test differs from the ``test_write_null_data_values``
above in that it tests the column value limits in the Byte-By-Byte
description section for columns whose values are masked.
It also checks the description for columns with same values.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [0.0/0.01]? Description of e ",
"16-17 F2.0 --- d ? Description of d ",
"19-25 E7.1 --- s [-9e+34/2.0] Description of s ",
"27-29 I3 --- i [-30/67] Description of i ",
"31-33 F3.1 --- sameF [5.0/5.0] Description of sameF",
"35-36 I2 --- sameI [20] Description of sameI ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 2e+00 67 5.0 20",
"HD103095 -9e+34 -30 5.0 20",
]
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name="sameF")
t.add_column([20, 20], name="sameI")
t["e"] = MaskedColumn(t["e"], mask=[False, True])
t["d"] = MaskedColumn(t["d"], mask=[True, True])
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
assert lines == exp_output
exp_coord_cols_output = dict(
# fmt: off
generic=[
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-45 I2 h RAh Right Ascension (hour) ',
'47-48 I2 min RAm Right Ascension (minute) ',
'50-62 F13.10 s RAs Right Ascension (second) ',
' 64 A1 --- DE- Sign of Declination ',
'65-66 I2 deg DEd Declination (degree) ',
'68-69 I2 arcmin DEm Declination (arcmin) ',
'71-82 F12.9 arcsec DEs Declination (arcsec) ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 22 02 15.4500000000 -61 39 34.599996000',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
],
positive_de=[
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-45 I2 h RAh Right Ascension (hour) ',
'47-48 I2 min RAm Right Ascension (minute) ',
'50-62 F13.10 s RAs Right Ascension (second) ',
' 64 A1 --- DE- Sign of Declination ',
'65-66 I2 deg DEd Declination (degree) ',
'68-69 I2 arcmin DEm Declination (arcmin) ',
'71-82 F12.9 arcsec DEs Declination (arcsec) ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
],
# fmt: on
galactic=[
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [-3160000.0/0.01] Description of e",
"16-23 F8.5 --- d [22.25/27.25] Description of d ",
"25-31 E7.1 --- s [-9e+34/2.0] Description of s ",
"33-35 I3 --- i [-30/67] Description of i ",
"37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ",
"41-42 I2 --- sameI [20] Description of sameI ",
"44-59 F16.12 deg GLON Galactic Longitude ",
"61-76 F16.12 deg GLAT Galactic Latitude ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 22.25608 2e+00 67 5.0 20 330.071639591690 -45.548080484609",
"HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 330.071639591690 -45.548080484609",
],
ecliptic=[
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [-3160000.0/0.01] Description of e ",
"16-23 F8.5 --- d [22.25/27.25] Description of d ",
"25-31 E7.1 --- s [-9e+34/2.0] Description of s ",
"33-35 I3 --- i [-30/67] Description of i ",
"37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ",
"41-42 I2 --- sameI [20] Description of sameI ",
"44-59 F16.12 deg ELON Ecliptic Longitude (geocentrictrueecliptic)",
"61-76 F16.12 deg ELAT Ecliptic Latitude (geocentrictrueecliptic) ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 22.25608 2e+00 67 5.0 20 306.224208650096 -45.621789850825",
"HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 306.224208650096 -45.621789850825",
],
)
def test_write_coord_cols():
"""
There can only be one such coordinate column in a single table,
because division of columns into individual component columns requires
iterating over the table columns, which will have to be done again
if additional such coordinate columns are present.
"""
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name="sameF")
t.add_column([20, 20], name="sameI")
# Coordinates of ASASSN-15lh
coord = SkyCoord(330.564375, -61.65961111, unit=u.deg)
# Coordinates of ASASSN-14li
coordp = SkyCoord(192.06343503, 17.77402684, unit=u.deg)
cols = [
Column([coord, coordp]), # Generic coordinate column
coordp, # Coordinate column with positive DEC
coord.galactic, # Galactic coordinates
coord.geocentrictrueecliptic, # Ecliptic coordinates
]
# Loop through different types of coordinate columns.
for col, coord_type in zip(cols, exp_coord_cols_output):
exp_output = exp_coord_cols_output[coord_type]
t["coord"] = col
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
# Check the written table.
assert lines == exp_output
# Check if the original table columns remains unmodified.
assert t.colnames == ["names", "e", "d", "s", "i", "sameF", "sameI", "coord"]
def test_write_byte_by_byte_bytes_col_format():
"""
Tests the alignment of Byte counts with respect to hyphen
in the Bytes column of Byte-By-Byte. The whitespace around the
hyphen is govered by the number of digits in the total Byte
count. Single Byte columns should have a single Byte count
without the hyphen.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-21 E12.6 --- e [-3160000.0/0.01] Description of e",
"23-30 F8.5 --- d [22.25/27.25] Description of d ",
"32-38 E7.1 --- s [-9e+34/2.0] Description of s ",
"40-42 I3 --- i [-30/67] Description of i ",
"44-46 F3.1 --- sameF [5.0/5.0] Description of sameF ",
"48-49 I2 --- sameI [20] Description of sameI ",
" 51 I1 --- singleByteCol [2] Description of singleByteCol ",
"53-54 I2 h RAh Right Ascension (hour) ",
"56-57 I2 min RAm Right Ascension (minute) ",
"59-71 F13.10 s RAs Right Ascension (second) ",
" 73 A1 --- DE- Sign of Declination ",
"74-75 I2 deg DEd Declination (degree) ",
"77-78 I2 arcmin DEm Declination (arcmin) ",
"80-91 F12.9 arcsec DEs Declination (arcsec) ",
"--------------------------------------------------------------------------------",
]
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name="sameF")
t.add_column([20, 20], name="sameI")
t["coord"] = SkyCoord(330.564375, -61.65961111, unit=u.deg)
t["singleByteCol"] = [2, 2]
t["e"].format = ".5E"
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))]
# Select only the Byte-By-Byte section.
lines = lines[i_secs[0] : i_secs[-2]]
lines.append("-" * 80) # Append a separator line.
assert lines == exp_output
def test_write_byte_by_byte_wrapping():
"""
Test line wrapping in the description column of the
Byte-By-Byte section of the ReadMe.
"""
exp_output = """\
================================================================================
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- thisIsALongColumnLabel This is a tediously long
description. But they do sometimes
have them. Better to put extra
details in the notes. This is a
tediously long description. But they
do sometimes have them. Better to put
extra details in the notes.
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
--------------------------------------------------------------------------------
"""
t = ascii.read(test_dat)
t.remove_columns(["s", "i"])
description = (
"This is a tediously long description."
+ " But they do sometimes have them."
+ " Better to put extra details in the notes. "
)
t["names"].description = description * 2
t["names"].name = "thisIsALongColumnLabel"
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))]
# Select only the Byte-By-Byte section.
lines = lines[i_secs[0] : i_secs[-2]]
lines.append("-" * 80) # Append a separator line.
assert lines == exp_output.splitlines()
def test_write_mixin_and_broken_cols():
"""
Tests conversion to string values for ``mix-in`` columns other than
``SkyCoord`` and for columns with only partial ``SkyCoord`` values.
"""
# fmt: off
exp_output = [
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 7 A7 --- name Description of name ',
' 9- 74 A66 --- Unknown Description of Unknown',
' 76-114 A39 --- Unknown Description of Unknown',
'116-138 A23 --- Unknown Description of Unknown',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 <SkyCoord (ICRS): (ra, dec) in deg',
' (330.564375, -61.65961111)> (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000',
'random 12 (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000',
]
# fmt: on
t = Table()
t["name"] = ["HD81809"]
coord = SkyCoord(330.564375, -61.65961111, unit=u.deg)
t["coord"] = Column(coord)
t.add_row(["random", 12])
t["cart"] = coord.cartesian
t["time"] = Time("2019-1-1")
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
# Check the written table.
assert lines == exp_output
def test_write_extra_skycoord_cols():
"""
Tests output for cases when table contains multiple ``SkyCoord`` columns.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 7 A7 --- name Description of name ",
" 9-10 I2 h RAh Right Ascension (hour) ",
"12-13 I2 min RAm Right Ascension (minute)",
"15-27 F13.10 s RAs Right Ascension (second)",
" 29 A1 --- DE- Sign of Declination ",
"30-31 I2 deg DEd Declination (degree) ",
"33-34 I2 arcmin DEm Declination (arcmin) ",
"36-47 F12.9 arcsec DEs Declination (arcsec) ",
"49-62 A14 --- coord2 Description of coord2 ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD4760 0 49 39.9000000000 +06 24 07.999200000 12.4163 6.407 ",
"HD81809 22 02 15.4500000000 -61 39 34.599996000 330.564 -61.66",
]
t = Table()
t["name"] = ["HD4760", "HD81809"]
t["coord1"] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg)
t["coord2"] = SkyCoord([12.41630, 330.564400], [6.407, -61.66], unit=u.deg)
out = StringIO()
with pytest.warns(
UserWarning,
match=r"column 2 is being skipped with designation of a "
r"string valued column `coord2`",
):
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines.
# Check the written table.
assert lines[:-2] == exp_output[:-2]
for a, b in zip(lines[-2:], exp_output[-2:]):
assert a[:18] == b[:18]
assert a[30:42] == b[30:42]
assert_almost_equal(
np.fromstring(a[2:], sep=" "), np.fromstring(b[2:], sep=" ")
)
def test_write_skycoord_with_format():
"""
Tests output with custom setting for ``SkyCoord`` (second) columns.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 7 A7 --- name Description of name ",
" 9-10 I2 h RAh Right Ascension (hour) ",
"12-13 I2 min RAm Right Ascension (minute)",
"15-19 F5.2 s RAs Right Ascension (second)",
" 21 A1 --- DE- Sign of Declination ",
"22-23 I2 deg DEd Declination (degree) ",
"25-26 I2 arcmin DEm Declination (arcmin) ",
"28-31 F4.1 arcsec DEs Declination (arcsec) ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD4760 0 49 39.90 +06 24 08.0",
"HD81809 22 02 15.45 -61 39 34.6",
]
t = Table()
t["name"] = ["HD4760", "HD81809"]
t["coord"] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg)
out = StringIO()
# This will raise a warning because `formats` is checked before the writer creating the
# final list of columns is called.
with pytest.warns(
AstropyWarning,
match=r"The key.s. {'[RD][AE]s', '[RD][AE]s'} specified in "
r"the formats argument do not match a column name.",
):
t.write(out, format="ascii.mrt", formats={"RAs": "05.2f", "DEs": "04.1f"})
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines.
# Check the written table.
assert lines == exp_output
|
c841765a445a696d2afda8ada58cf9641a2c6d8f8148b32d542a00a4701632ff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.io.misc import fnpickle, fnunpickle
def test_fnpickling_simple(tmp_path):
"""
Tests the `fnpickle` and `fnupickle` functions' basic operation by
pickling and unpickling a string, using both a filename and a
file.
"""
fn = str(tmp_path / "test1.pickle")
obj1 = "astring"
fnpickle(obj1, fn)
res = fnunpickle(fn, 0)
assert obj1 == res
# now try with a file-like object instead of a string
with open(fn, "wb") as f:
fnpickle(obj1, f)
with open(fn, "rb") as f:
res = fnunpickle(f)
assert obj1 == res
class ToBePickled:
def __init__(self, item):
self.item = item
def __eq__(self, other):
if isinstance(other, ToBePickled):
return self.item == other.item
else:
return False
def test_fnpickling_class(tmp_path):
"""
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle custom classes.
"""
fn = str(tmp_path / "test2.pickle")
obj1 = "astring"
obj2 = ToBePickled(obj1)
fnpickle(obj2, fn)
res = fnunpickle(fn)
assert res == obj2
def test_fnpickling_protocol(tmp_path):
"""
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle pickle files from all protocols.
"""
import pickle
obj1 = "astring"
obj2 = ToBePickled(obj1)
for p in range(pickle.HIGHEST_PROTOCOL + 1):
fn = str(tmp_path / f"testp{p}.pickle")
fnpickle(obj2, fn, protocol=p)
res = fnunpickle(fn)
assert res == obj2
def test_fnpickling_many(tmp_path):
"""
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle multiple objects from a single file.
"""
fn = str(tmp_path / "test3.pickle")
# now try multiples
obj3 = 328.3432
obj4 = "blahblahfoo"
fnpickle(obj3, fn)
fnpickle(obj4, fn, append=True)
res = fnunpickle(fn, number=-1)
assert len(res) == 2
assert res[0] == obj3
assert res[1] == obj4
fnpickle(obj4, fn, append=True)
res = fnunpickle(fn, number=2)
assert len(res) == 2
with pytest.raises(EOFError):
fnunpickle(fn, number=5)
|
80a036c40a5e62bb476d51dd2de6ee3c7e2eb80aef791e81f5b68185a8561769 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Comparison functions for `astropy.cosmology.Cosmology`.
This module is **NOT** public API. To use these functions, import them from
the top-level namespace -- :mod:`astropy.cosmology`.
This module will be moved.
"""
from __future__ import annotations
import functools
import inspect
from typing import Any, Callable, Tuple, Union
import numpy as np
from numpy import False_, True_, ndarray
from astropy import table
from astropy.cosmology.core import Cosmology
__all__ = [] # Nothing is scoped here
##############################################################################
# PARAMETERS
_FormatType = Union[bool, None, str]
_FormatsT = Union[_FormatType, Tuple[_FormatType, ...]]
_CompFnT = Callable[[Any, _FormatType], Cosmology]
_COSMO_AOK: set[Any] = {None, True_, False_, "astropy.cosmology"}
# The numpy bool also catches real bool for ops "==" and "in"
##############################################################################
# UTILITIES
class _CosmologyWrapper:
"""
A private wrapper class to hide things from :mod:`numpy`.
This should never be exposed to the user.
"""
__slots__ = ("wrapped",)
# Use less memory and speed up initialization.
_cantbroadcast: tuple[type, ...] = (table.Row, table.Table)
"""
Have to deal with things that do not broadcast well. e.g.
`~astropy.table.Row` cannot be used in an array, even if ``dtype=object``
and will raise a segfault when used in a `numpy.ufunc`.
"""
wrapped: Any
def __init__(self, wrapped: Any) -> None:
self.wrapped = wrapped
# TODO! when py3.9+ use @functools.partial(np.frompyfunc, nin=2, nout=1)
# TODO! https://github.com/numpy/numpy/issues/9477 segfaults on astropy.row
# and np.vectorize can't coerce table to dtypes
def _wrap_to_ufunc(nin: int, nout: int) -> Callable[[_CompFnT], np.ufunc]:
def wrapper(pyfunc: _CompFnT) -> np.ufunc:
ufunc = np.frompyfunc(pyfunc, 2, 1)
return ufunc
return wrapper
@_wrap_to_ufunc(2, 1)
def _parse_format(cosmo: Any, format: _FormatType, /) -> Cosmology:
"""Parse Cosmology-like input into Cosmologies, given a format hint.
Parameters
----------
cosmo : |Cosmology|-like, positional-only
|Cosmology| to parse.
format : bool or None or str, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting.
Returns
-------
|Cosmology| or generator thereof
Raises
------
TypeError
If ``cosmo`` is not a |Cosmology| and ``format`` equals `False`.
TypeError
If ``cosmo`` is a |Cosmology| and ``format`` is not `None` or equal to
`True`.
"""
# Deal with private wrapper
if isinstance(cosmo, _CosmologyWrapper):
cosmo = cosmo.wrapped
# Shortcut if already a cosmology
if isinstance(cosmo, Cosmology):
if format not in _COSMO_AOK:
allowed = "/".join(map(str, _COSMO_AOK))
raise ValueError(
f"for parsing a Cosmology, 'format' must be {allowed}, not {format}"
)
return cosmo
# Convert, if allowed.
elif format == False_: # catches False and False_
raise TypeError(
f"if 'format' is False, arguments must be a Cosmology, not {cosmo}"
)
else:
format = None if format == True_ else format # str->str, None/True/True_->None
out = Cosmology.from_format(cosmo, format=format) # this can error!
return out
def _parse_formats(*cosmos: object, format: _FormatsT) -> ndarray:
"""Parse Cosmology-like to |Cosmology|, using provided formats.
``format`` is broadcast to match the shape of the cosmology arguments. Note
that the cosmology arguments are not broadcast against ``format``, so it
cannot determine the output shape.
Parameters
----------
*cosmos : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by the corresponding ``format``.
format : bool or None or str or array-like thereof, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting. Note ``format`` is broadcast as an object array to match the
shape of ``cosmos`` so ``format`` cannot determine the output shape.
Raises
------
TypeError
If any in ``cosmos`` is not a |Cosmology| and the corresponding
``format`` equals `False`.
"""
formats = np.broadcast_to(np.array(format, dtype=object), len(cosmos))
# parse each cosmo & format
# Have to deal with things that do not broadcast well.
# astropy.row cannot be used in an array, even if dtype=object
# and will raise a segfault when used in a ufunc.
towrap = (isinstance(cosmo, _CosmologyWrapper._cantbroadcast) for cosmo in cosmos)
wcosmos = [
c if not wrap else _CosmologyWrapper(c) for c, wrap in zip(cosmos, towrap)
]
return _parse_format(wcosmos, formats)
def _comparison_decorator(pyfunc: Callable[..., Any]) -> Callable[..., Any]:
"""Decorator to make wrapper function that parses |Cosmology|-like inputs.
Parameters
----------
pyfunc : Python function object
An arbitrary Python function.
Returns
-------
callable[..., Any]
Wrapped `pyfunc`, as described above.
Notes
-----
All decorated functions should add the following to 'Parameters'.
format : bool or None or str or array-like thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
"""
sig = inspect.signature(pyfunc)
nin = sum(p.kind == 0 for p in sig.parameters.values())
# Make wrapper function that parses cosmology-like inputs
@functools.wraps(pyfunc)
def wrapper(*cosmos: Any, format: _FormatsT = False, **kwargs: Any) -> bool:
if len(cosmos) > nin:
raise TypeError(
f"{wrapper.__wrapped__.__name__} takes {nin} positional"
f" arguments but {len(cosmos)} were given"
)
# Parse cosmologies to format. Only do specified number.
cosmos = _parse_formats(*cosmos, format=format)
# Evaluate pyfunc, erroring if didn't match specified number.
result = wrapper.__wrapped__(*cosmos, **kwargs)
# Return, casting to correct type casting is possible.
return result
return wrapper
##############################################################################
# COMPARISON FUNCTIONS
@_comparison_decorator
def cosmology_equal(
cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool = False
) -> bool:
r"""Return element-wise equality check on the cosmologies.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
Examples
--------
Assuming the following imports
>>> import astropy.units as u
>>> from astropy.cosmology import FlatLambdaCDM
Two identical cosmologies are equal.
>>> cosmo1 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmology_equal(cosmo1, cosmo2)
True
And cosmologies with different parameters are not.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.4)
>>> cosmology_equal(cosmo1, cosmo3)
False
Two cosmologies may be equivalent even if not of the same class. In these
examples the |LambdaCDM| has :attr:`~astropy.cosmology.LambdaCDM.Ode0` set
to the same value calculated in |FlatLambdaCDM|.
>>> from astropy.cosmology import LambdaCDM
>>> cosmo3 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmology_equal(cosmo1, cosmo3)
False
>>> cosmology_equal(cosmo1, cosmo3, allow_equivalent=True)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo4 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmology_equal(cosmo3, cosmo4, allow_equivalent=True)
False
Also, using the keyword argument, the notion of equality is extended to any
Python object that can be converted to a |Cosmology|.
>>> mapping = cosmo2.to_format("mapping")
>>> cosmology_equal(cosmo1, mapping, format=True)
True
Either (or both) arguments can be |Cosmology|-like.
>>> cosmology_equal(mapping, cosmo2, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be checked
with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of these
kinds can still be checked for equality, but the correct format string must
be used.
>>> yml = cosmo2.to_format("yaml")
>>> cosmology_equal(cosmo1, yml, format=(None, "yaml"))
True
This also works with an array of ``format`` matching the number of
cosmologies.
>>> cosmology_equal(mapping, yml, format=[True, "yaml"])
True
"""
# Check parameter equality
if not allow_equivalent:
eq = cosmo1 == cosmo2
else:
# Check parameter equivalence
# The options are: 1) same class & parameters; 2) same class, different
# parameters; 3) different classes, equivalent parameters; 4) different
# classes, different parameters. (1) & (3) => True, (2) & (4) => False.
eq = cosmo1.__equiv__(cosmo2)
if eq is NotImplemented:
eq = cosmo2.__equiv__(cosmo1) # that failed, try from 'other'
eq = False if eq is NotImplemented else eq
# TODO! include equality check of metadata
return eq
@_comparison_decorator
def _cosmology_not_equal(
cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool = False
) -> bool:
r"""Return element-wise cosmology non-equality check.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
out : ndarray, None, optional
A location into which the result is stored. If provided, it must have a
shape that the inputs broadcast to. If not provided or None, a
freshly-allocated array is returned.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a Cosmology. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. ``format`` is
broadcast to match the shape of the cosmology arguments. Note that the
cosmology arguments are not broadcast against ``format``, so it cannot
determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
See Also
--------
astropy.cosmology.cosmology_equal
Element-wise equality check, with argument conversion to Cosmology.
"""
neq = not cosmology_equal(cosmo1, cosmo2, allow_equivalent=allow_equivalent)
# TODO! it might eventually be worth the speed boost to implement some of
# the internals of cosmology_equal here, but for now it's a hassle.
return neq
|
7b0853906c434a01bb824b1c252ddc90f1dbcdbb4211e13e4725565141d7bfc8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for `astropy.cosmology`."""
from .comparison import cosmology_equal
# _z_at_scalar_value is imported for backwards compatibility
from .optimize import _z_at_scalar_value, z_at_value
__all__ = ["z_at_value", "cosmology_equal"]
|
4beef28d0603e661a91908f88c6949eab6aea12de84a16d664bc6df02a78c058 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.modeling import Parameter as ModelParameter
from astropy.table import Column
FULLQUALNAME_SUBSTITUTIONS = {
"astropy.cosmology.flrw.base.FLRW": "astropy.cosmology.flrw.FLRW",
"astropy.cosmology.flrw.lambdacdm.LambdaCDM": "astropy.cosmology.flrw.LambdaCDM",
"astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM": (
"astropy.cosmology.flrw.FlatLambdaCDM"
),
"astropy.cosmology.flrw.w0wacdm.w0waCDM": "astropy.cosmology.flrw.w0waCDM",
"astropy.cosmology.flrw.w0wacdm.Flatw0waCDM": "astropy.cosmology.flrw.Flatw0waCDM",
"astropy.cosmology.flrw.w0wzcdm.w0wzCDM": "astropy.cosmology.flrw.w0wzCDM",
"astropy.cosmology.flrw.w0cdm.wCDM": "astropy.cosmology.flrw.wCDM",
"astropy.cosmology.flrw.w0cdm.FlatwCDM": "astropy.cosmology.flrw.FlatwCDM",
"astropy.cosmology.flrw.wpwazpcdm.wpwaCDM": "astropy.cosmology.flrw.wpwaCDM",
}
"""Substitutions mapping the actual qualified name to its preferred value."""
def convert_parameter_to_column(parameter, value, meta=None):
"""Convert a |Cosmology| Parameter to a Table |Column|.
Parameters
----------
parameter : `astropy.cosmology.parameter.Parameter`
value : Any
meta : dict or None, optional
Information from the Cosmology's metadata.
Returns
-------
`astropy.table.Column`
"""
shape = (1,) + np.shape(value) # minimum of 1d
col = Column(
data=np.reshape(value, shape),
name=parameter.name,
dtype=None, # inferred from the data
description=parameter.__doc__,
format=None,
meta=meta,
)
return col
def convert_parameter_to_model_parameter(parameter, value, meta=None):
"""Convert a Cosmology Parameter to a Model Parameter.
Parameters
----------
parameter : `astropy.cosmology.parameter.Parameter`
value : Any
meta : dict or None, optional
Information from the Cosmology's metadata.
This function will use any of: 'getter', 'setter', 'fixed', 'tied',
'min', 'max', 'bounds', 'prior', 'posterior'.
Returns
-------
`astropy.modeling.Parameter`
"""
# Get from meta information relevant to Model
attrs = (
"getter",
"setter",
"fixed",
"tied",
"min",
"max",
"bounds",
"prior",
"posterior",
)
extra = {k: v for k, v in (meta or {}).items() if k in attrs}
return ModelParameter(
description=parameter.__doc__,
default=value,
unit=getattr(value, "unit", None),
**extra
)
|
25238d863d5bc1c7194bbd9d8cad9779fad8a13f1c012237f6399787e30941d5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import json
import os
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import units as cu
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
def read_json(filename, **kwargs):
"""Read JSON.
Parameters
----------
filename : str
**kwargs
Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format`
Returns
-------
`~astropy.cosmology.Cosmology` instance
"""
# read
if isinstance(filename, (str, bytes, os.PathLike)):
with open(filename) as file:
data = file.read()
else: # file-like : this also handles errors in dumping
data = filename.read()
mapping = json.loads(data) # parse json mappable to dict
# deserialize Quantity
with u.add_enabled_units(cu.redshift):
for k, v in mapping.items():
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping[k] = u.Quantity(v["value"], v["unit"])
for k, v in mapping.get("meta", {}).items(): # also the metadata
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping["meta"][k] = u.Quantity(v["value"], v["unit"])
return Cosmology.from_format(mapping, format="mapping", **kwargs)
def write_json(cosmology, file, *, overwrite=False):
"""Write Cosmology to JSON.
Parameters
----------
cosmology : `astropy.cosmology.Cosmology` subclass instance
file : path-like or file-like
overwrite : bool (optional, keyword-only)
"""
data = cosmology.to_format("mapping") # start by turning into dict
data["cosmology"] = data["cosmology"].__qualname__
# serialize Quantity
for k, v in data.items():
if isinstance(v, u.Quantity):
data[k] = {"value": v.value.tolist(), "unit": str(v.unit)}
for k, v in data.get("meta", {}).items(): # also serialize the metadata
if isinstance(v, u.Quantity):
data["meta"][k] = {"value": v.value.tolist(), "unit": str(v.unit)}
# check that file exists and whether to overwrite.
if os.path.exists(file) and not overwrite:
raise OSError(f"{file} exists. Set 'overwrite' to write over.")
with open(file, "w") as write_file:
json.dump(data, write_file)
def json_identify(origin, filepath, fileobj, *args, **kwargs):
return filepath is not None and filepath.endswith(".json")
###############################################################################
class ReadWriteJSONTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="json"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class", autouse=True)
def register_and_unregister_json(self):
"""Setup & teardown for JSON read/write tests."""
# Register
readwrite_registry.register_reader("json", Cosmology, read_json, force=True)
readwrite_registry.register_writer("json", Cosmology, write_json, force=True)
readwrite_registry.register_identifier(
"json", Cosmology, json_identify, force=True
)
yield # Run all tests in class
# Unregister
readwrite_registry.unregister_reader("json", Cosmology)
readwrite_registry.unregister_writer("json", Cosmology)
readwrite_registry.unregister_identifier("json", Cosmology)
# ========================================================================
def test_readwrite_json_subclass_partial_info(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_readwrite_json_subclass_partial_info.json"
# test write
cosmo.write(fp, format="json")
# partial information
with open(fp) as file:
L = file.readlines()[0]
L = (
L[: L.index('"cosmology":')] + L[L.index(", ") + 2 :]
) # remove cosmology : #203
i = L.index('"Tcmb0":') # delete Tcmb0
L = (
L[:i] + L[L.index(", ", L.index(", ", i) + 1) + 2 :]
) # second occurrence : #203
tempfname = tmp_path / f"{cosmo.name}_temp.json"
with open(tempfname, "w") as file:
file.writelines([L])
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(tempfname, format="json")
got2 = read(tempfname, format="json", cosmology=cosmo_cls)
got3 = read(tempfname, format="json", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
class TestReadWriteJSON(ReadWriteDirectTestBase, ReadWriteJSONTestMixin):
"""
Directly test ``read/write_json``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="json")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_json, "write": write_json}
|
42c8cbd20b2da41ce7f38f905824b6ad92b2696b15570b0fee3cd4c7eebe1a09 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
import sys
from pathlib import Path
from .version import version as __version__
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
online_docs_root = "https://docs.astropy.org/en/{}/".format(
"latest" if "dev" in __version__ else f"v{__version__}"
)
from . import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
"When True, use Unicode characters when outputting values, and "
"displaying widgets at the console.",
)
use_color = _config.ConfigItem(
sys.platform != "win32",
"When True, use ANSI color escape sequences when writing to the console.",
aliases=["astropy.utils.console.USE_COLOR", "astropy.logger.USE_COLOR"],
)
max_lines = _config.ConfigItem(
None,
description=(
"Maximum number of lines in the display of pretty-printed "
"objects. If not provided, try to determine automatically from the "
"terminal size. Negative numbers mean no limit."
),
cfgtype="integer(default=None)",
aliases=["astropy.table.pprint.max_lines"],
)
max_width = _config.ConfigItem(
None,
description=(
"Maximum number of characters per line in the display of "
"pretty-printed objects. If not provided, try to determine "
"automatically from the terminal size. Negative numbers mean no "
"limit."
),
cfgtype="integer(default=None)",
aliases=["astropy.table.pprint.max_width"],
)
conf = Conf()
# Define a base ScienceState for configuring constants and units
from .utils.state import ScienceState
class base_constants_version(ScienceState):
"""
Base class for the real version-setters below.
"""
_value = "test"
_versions = dict(test="test")
@classmethod
def validate(cls, value):
if value not in cls._versions:
raise ValueError(f"Must be one of {list(cls._versions.keys())}")
return cls._versions[value]
@classmethod
def set(cls, value):
"""
Set the current constants value.
"""
import sys
if "astropy.units" in sys.modules:
raise RuntimeError("astropy.units is already imported")
if "astropy.constants" in sys.modules:
raise RuntimeError("astropy.constants is already imported")
return super().set(value)
class physical_constants(base_constants_version):
"""
The version of physical constants to use.
"""
# Maintainers: update when new constants are added
_value = "codata2018"
_versions = dict(
codata2018="codata2018",
codata2014="codata2014",
codata2010="codata2010",
astropyconst40="codata2018",
astropyconst20="codata2014",
astropyconst13="codata2010",
)
class astronomical_constants(base_constants_version):
"""
The version of astronomical constants to use.
"""
# Maintainers: update when new constants are added
_value = "iau2015"
_versions = dict(
iau2015="iau2015",
iau2012="iau2012",
astropyconst40="iau2015",
astropyconst20="iau2015",
astropyconst13="iau2012",
)
# Create the test() function
from .tests.runner import TestRunner
test = TestRunner.make_test_runner_in(__path__[0])
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
try:
from .utils import _compiler # noqa: F401
except ImportError:
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if (Path(__file__).parent.parent / ".astropy-root").exists():
raise ImportError(
"You appear to be trying to import astropy from "
"within a source checkout or from an editable "
"installation without building the extension "
"modules first. Either run:\n\n"
" pip install -e .\n\nor\n\n"
" python setup.py build_ext --inplace\n\n"
"to make sure the extension modules are built "
) from None
# Outright broken installation, just raise standard error
raise
# Set the bibtex entry to the article referenced in CITATION.
def _get_bibtex():
refs = (Path(__file__).parent / "CITATION").read_text().split("@ARTICLE")[1:]
return f"@ARTICLE{refs[0]}" if refs else ""
__citation__ = __bibtex__ = _get_bibtex()
from .logger import _init_log, _teardown_log
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page # noqa: F401
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
import webbrowser
from urllib.parse import urlencode
url = online_docs_root + f"search.html?{urlencode({'q': query})}"
webbrowser.open(url)
__dir_inc__ = [
"__version__",
"__githash__",
"__bibtex__",
"test",
"log",
"find_api_page",
"online_help",
"online_docs_root",
"conf",
"physical_constants",
"astronomical_constants",
]
from types import ModuleType as __module_type__
# Clean up top-level namespace--delete everything that isn't in __dir_inc__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not (
(varname.startswith("__") and varname.endswith("__"))
or varname in __dir_inc__
or (
varname[0] != "_"
and isinstance(locals()[varname], __module_type__)
and locals()[varname].__name__.startswith(__name__ + ".")
)
):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
|
6a5f339183ce60df9b18a9cfd201262a9b5a06590eb69beb57c8e76cfe34f92b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains pytest configuration settings that are astropy-specific
(i.e. those that would not necessarily be shared by affiliated packages
making use of astropy's test runner).
"""
import builtins
import os
import sys
import tempfile
import warnings
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
except ImportError:
PYTEST_HEADER_MODULES = {}
TESTED_VERSIONS = {}
import pytest
from astropy import __version__
# This is needed to silence a warning from matplotlib caused by
# PyInstaller's matplotlib runtime hook. This can be removed once the
# issue is fixed upstream in PyInstaller, and only impacts us when running
# the tests from a PyInstaller bundle.
# See https://github.com/astropy/astropy/issues/10785
if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
# The above checks whether we are running in a PyInstaller bundle.
warnings.filterwarnings("ignore", "(?s).*MATPLOTLIBDATA.*", category=UserWarning)
# Note: while the filterwarnings is required, this import has to come after the
# filterwarnings above, because this attempts to import matplotlib:
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB
if HAS_MATPLOTLIB:
import matplotlib
matplotlibrc_cache = {}
@pytest.fixture
def ignore_matplotlibrc():
# This is a fixture for tests that use matplotlib but not pytest-mpl
# (which already handles rcParams)
from matplotlib import pyplot as plt
with plt.style.context({}, after_reset=True):
yield
@pytest.fixture
def fast_thread_switching():
"""Fixture that reduces thread switching interval.
This makes it easier to provoke race conditions.
"""
old = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
yield
sys.setswitchinterval(old)
def pytest_configure(config):
from astropy.utils.iers import conf as iers_conf
# Disable IERS auto download for testing
iers_conf.auto_download = False
builtins._pytest_running = True
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
matplotlibrc_cache.update(matplotlib.rcParams)
matplotlib.rcdefaults()
matplotlib.use("Agg")
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration. Note that this
# is also set in the test runner, but we need to also set it here for
# things to work properly in parallel mode
builtins._xdg_config_home_orig = os.environ.get("XDG_CONFIG_HOME")
builtins._xdg_cache_home_orig = os.environ.get("XDG_CACHE_HOME")
os.environ["XDG_CONFIG_HOME"] = tempfile.mkdtemp("astropy_config")
os.environ["XDG_CACHE_HOME"] = tempfile.mkdtemp("astropy_cache")
os.mkdir(os.path.join(os.environ["XDG_CONFIG_HOME"], "astropy"))
os.mkdir(os.path.join(os.environ["XDG_CACHE_HOME"], "astropy"))
config.option.astropy_header = True
PYTEST_HEADER_MODULES["PyERFA"] = "erfa"
PYTEST_HEADER_MODULES["Cython"] = "cython"
PYTEST_HEADER_MODULES["Scikit-image"] = "skimage"
PYTEST_HEADER_MODULES["asdf"] = "asdf"
TESTED_VERSIONS["Astropy"] = __version__
def pytest_unconfigure(config):
from astropy.utils.iers import conf as iers_conf
# Undo IERS auto download setting for testing
iers_conf.reset("auto_download")
builtins._pytest_running = False
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
matplotlib.rcParams.update(matplotlibrc_cache)
matplotlibrc_cache.clear()
if builtins._xdg_config_home_orig is None:
os.environ.pop("XDG_CONFIG_HOME")
else:
os.environ["XDG_CONFIG_HOME"] = builtins._xdg_config_home_orig
if builtins._xdg_cache_home_orig is None:
os.environ.pop("XDG_CACHE_HOME")
else:
os.environ["XDG_CACHE_HOME"] = builtins._xdg_cache_home_orig
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get("failed"):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
"Some tests may fail when run from the IPython prompt; "
"especially, but not limited to tests involving logging and warning "
"handling. Unless you are certain as to the cause of the failure, "
"please check that the failure occurs outside IPython as well. See "
"https://docs.astropy.org/en/stable/known_issues.html#failing-logging-"
"tests-when-running-the-tests-in-ipython for more information.",
yellow=True,
bold=True,
)
|
09315792032cd9fdd651d00c2c9aad89e23c8ee73460c1bb83940d804a9191b8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module defines a logging class based on the built-in logging module.
.. note::
This module is meant for internal ``astropy`` usage. For use in other
packages, we recommend implementing your own logger instead.
"""
import inspect
import logging
import os
import sys
import warnings
from contextlib import contextmanager
from . import conf as _conf
from . import config as _config
from .utils import find_current_module
from .utils.exceptions import AstropyUserWarning, AstropyWarning
__all__ = ["Conf", "conf", "log", "AstropyLogger", "LoggingError"]
# import the logging levels from logging so that one can do:
# log.setLevel(log.DEBUG), for example
logging_levels = [
"NOTSET",
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL",
"FATAL",
]
for level in logging_levels:
globals()[level] = getattr(logging, level)
__all__ += logging_levels
# Initialize by calling _init_log()
log = None
class LoggingError(Exception):
"""
This exception is for various errors that occur in the astropy logger,
typically when activating or deactivating logger-related features.
"""
class _AstLogIPYExc(Exception):
"""
An exception that is used only as a placeholder to indicate to the
IPython exception-catching mechanism that the astropy
exception-capturing is activated. It should not actually be used as
an exception anywhere.
"""
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.logger`.
"""
log_level = _config.ConfigItem(
"INFO",
"Threshold for the logging messages. Logging "
"messages that are less severe than this level "
"will be ignored. The levels are ``'DEBUG'``, "
"``'INFO'``, ``'WARNING'``, ``'ERROR'``.",
)
log_warnings = _config.ConfigItem(True, "Whether to log `warnings.warn` calls.")
log_exceptions = _config.ConfigItem(
False, "Whether to log exceptions before raising them."
)
log_to_file = _config.ConfigItem(
False, "Whether to always log messages to a log file."
)
log_file_path = _config.ConfigItem(
"",
"The file to log messages to. If empty string is given, "
"it defaults to a file ``'astropy.log'`` in "
"the astropy config directory.",
)
log_file_level = _config.ConfigItem(
"INFO", "Threshold for logging messages to `log_file_path`."
)
log_file_format = _config.ConfigItem(
"%(asctime)r, %(origin)r, %(levelname)r, %(message)r",
"Format for log file entries.",
)
log_file_encoding = _config.ConfigItem(
"",
"The encoding (e.g., UTF-8) to use for the log file. If empty string "
"is given, it defaults to the platform-preferred encoding.",
)
conf = Conf()
def _init_log():
"""Initializes the Astropy log--in most circumstances this is called
automatically when importing astropy.
"""
global log
orig_logger_cls = logging.getLoggerClass()
logging.setLoggerClass(AstropyLogger)
try:
log = logging.getLogger("astropy")
log._set_defaults()
finally:
logging.setLoggerClass(orig_logger_cls)
return log
def _teardown_log():
"""Shut down exception and warning logging (if enabled) and clear all
Astropy loggers from the logging module's cache.
This involves poking some logging module internals, so much if it is 'at
your own risk' and is allowed to pass silently if any exceptions occur.
"""
global log
if log.exception_logging_enabled():
log.disable_exception_logging()
if log.warnings_logging_enabled():
log.disable_warnings_logging()
del log
# Now for the fun stuff...
try:
logging._acquireLock()
try:
loggerDict = logging.Logger.manager.loggerDict
for key in loggerDict.keys():
if key == "astropy" or key.startswith("astropy."):
del loggerDict[key]
finally:
logging._releaseLock()
except Exception:
pass
Logger = logging.getLoggerClass()
class AstropyLogger(Logger):
"""
This class is used to set up the Astropy logging.
The main functionality added by this class over the built-in
logging.Logger class is the ability to keep track of the origin of the
messages, the ability to enable logging of warnings.warn calls and
exceptions, and the addition of colorized output and context managers to
easily capture messages to a file or list.
"""
def makeRecord(
self,
name,
level,
pathname,
lineno,
msg,
args,
exc_info,
func=None,
extra=None,
sinfo=None,
):
if extra is None:
extra = {}
if "origin" not in extra:
current_module = find_current_module(1, finddiff=[True, "logging"])
if current_module is not None:
extra["origin"] = current_module.__name__
else:
extra["origin"] = "unknown"
return Logger.makeRecord(
self,
name,
level,
pathname,
lineno,
msg,
args,
exc_info,
func=func,
extra=extra,
sinfo=sinfo,
)
_showwarning_orig = None
def _showwarning(self, *args, **kwargs):
# Bail out if we are not catching a warning from Astropy
if not isinstance(args[0], AstropyWarning):
return self._showwarning_orig(*args, **kwargs)
warning = args[0]
# Deliberately not using isinstance here: We want to display
# the class name only when it's not the default class,
# AstropyWarning. The name of subclasses of AstropyWarning should
# be displayed.
if type(warning) not in (AstropyWarning, AstropyUserWarning):
message = f"{warning.__class__.__name__}: {args[0]}"
else:
message = str(args[0])
mod_path = args[2]
# Now that we have the module's path, we look through sys.modules to
# find the module object and thus the fully-package-specified module
# name. The module.__file__ is the original source file name.
mod_name = None
mod_path, ext = os.path.splitext(mod_path)
for name, mod in list(sys.modules.items()):
try:
# Believe it or not this can fail in some cases:
# https://github.com/astropy/astropy/issues/2671
path = os.path.splitext(getattr(mod, "__file__", ""))[0]
except Exception:
continue
if path == mod_path:
mod_name = mod.__name__
break
if mod_name is not None:
self.warning(message, extra={"origin": mod_name})
else:
self.warning(message)
def warnings_logging_enabled(self):
return self._showwarning_orig is not None
def enable_warnings_logging(self):
"""
Enable logging of warnings.warn() calls.
Once called, any subsequent calls to ``warnings.warn()`` are
redirected to this logger and emitted with level ``WARN``. Note that
this replaces the output from ``warnings.warn``.
This can be disabled with ``disable_warnings_logging``.
"""
if self.warnings_logging_enabled():
raise LoggingError("Warnings logging has already been enabled")
self._showwarning_orig = warnings.showwarning
warnings.showwarning = self._showwarning
def disable_warnings_logging(self):
"""
Disable logging of warnings.warn() calls.
Once called, any subsequent calls to ``warnings.warn()`` are no longer
redirected to this logger.
This can be re-enabled with ``enable_warnings_logging``.
"""
if not self.warnings_logging_enabled():
raise LoggingError("Warnings logging has not been enabled")
if warnings.showwarning != self._showwarning:
raise LoggingError(
"Cannot disable warnings logging: "
"warnings.showwarning was not set by this "
"logger, or has been overridden"
)
warnings.showwarning = self._showwarning_orig
self._showwarning_orig = None
_excepthook_orig = None
def _excepthook(self, etype, value, traceback):
if traceback is None:
mod = None
else:
tb = traceback
while tb.tb_next is not None:
tb = tb.tb_next
mod = inspect.getmodule(tb)
# include the the error type in the message.
if len(value.args) > 0:
message = f"{etype.__name__}: {str(value)}"
else:
message = str(etype.__name__)
if mod is not None:
self.error(message, extra={"origin": mod.__name__})
else:
self.error(message)
self._excepthook_orig(etype, value, traceback)
def exception_logging_enabled(self):
"""
Determine if the exception-logging mechanism is enabled.
Returns
-------
exclog : bool
True if exception logging is on, False if not.
"""
try:
ip = get_ipython()
except NameError:
ip = None
if ip is None:
return self._excepthook_orig is not None
else:
return _AstLogIPYExc in ip.custom_exceptions
def enable_exception_logging(self):
"""
Enable logging of exceptions.
Once called, any uncaught exceptions will be emitted with level
``ERROR`` by this logger, before being raised.
This can be disabled with ``disable_exception_logging``.
"""
try:
ip = get_ipython()
except NameError:
ip = None
if self.exception_logging_enabled():
raise LoggingError("Exception logging has already been enabled")
if ip is None:
# standard python interpreter
self._excepthook_orig = sys.excepthook
sys.excepthook = self._excepthook
else:
# IPython has its own way of dealing with excepthook
# We need to locally define the function here, because IPython
# actually makes this a member function of their own class
def ipy_exc_handler(ipyshell, etype, evalue, tb, tb_offset=None):
# First use our excepthook
self._excepthook(etype, evalue, tb)
# Now also do IPython's traceback
ipyshell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)
# now register the function with IPython
# note that we include _AstLogIPYExc so `disable_exception_logging`
# knows that it's disabling the right thing
ip.set_custom_exc((BaseException, _AstLogIPYExc), ipy_exc_handler)
# and set self._excepthook_orig to a no-op
self._excepthook_orig = lambda etype, evalue, tb: None
def disable_exception_logging(self):
"""
Disable logging of exceptions.
Once called, any uncaught exceptions will no longer be emitted by this
logger.
This can be re-enabled with ``enable_exception_logging``.
"""
try:
ip = get_ipython()
except NameError:
ip = None
if not self.exception_logging_enabled():
raise LoggingError("Exception logging has not been enabled")
if ip is None:
# standard python interpreter
if sys.excepthook != self._excepthook:
raise LoggingError(
"Cannot disable exception logging: "
"sys.excepthook was not set by this logger, "
"or has been overridden"
)
sys.excepthook = self._excepthook_orig
self._excepthook_orig = None
else:
# IPython has its own way of dealing with exceptions
ip.set_custom_exc(tuple(), None)
def enable_color(self):
"""
Enable colorized output.
"""
_conf.use_color = True
def disable_color(self):
"""
Disable colorized output.
"""
_conf.use_color = False
@contextmanager
def log_to_file(self, filename, filter_level=None, filter_origin=None):
"""
Context manager to temporarily log messages to a file.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
By default, the logger already outputs log messages to a file set in
the Astropy configuration file. Using this context manager does not
stop log messages from being output to that file, nor does it stop log
messages from being printed to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_file('myfile.log'):
# your code here
"""
encoding = conf.log_file_encoding if conf.log_file_encoding else None
fh = logging.FileHandler(filename, encoding=encoding)
if filter_level is not None:
fh.setLevel(filter_level)
if filter_origin is not None:
fh.addFilter(FilterOrigin(filter_origin))
f = logging.Formatter(conf.log_file_format)
fh.setFormatter(f)
self.addHandler(fh)
yield
fh.close()
self.removeHandler(fh)
@contextmanager
def log_to_list(self, filter_level=None, filter_origin=None):
"""
Context manager to temporarily log messages to a list.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
Using this context manager does not stop log messages from being
output to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_list() as log_list:
# your code here
"""
lh = ListHandler()
if filter_level is not None:
lh.setLevel(filter_level)
if filter_origin is not None:
lh.addFilter(FilterOrigin(filter_origin))
self.addHandler(lh)
yield lh.log_list
self.removeHandler(lh)
def _set_defaults(self):
"""
Reset logger to its initial state.
"""
# Reset any previously installed hooks
if self.warnings_logging_enabled():
self.disable_warnings_logging()
if self.exception_logging_enabled():
self.disable_exception_logging()
# Remove all previous handlers
for handler in self.handlers[:]:
self.removeHandler(handler)
# Set levels
self.setLevel(conf.log_level)
# Set up the stdout handler
sh = StreamHandler()
self.addHandler(sh)
# Set up the main log file handler if requested (but this might fail if
# configuration directory or log file is not writeable).
if conf.log_to_file:
log_file_path = conf.log_file_path
# "None" as a string because it comes from config
try:
_ASTROPY_TEST_
testing_mode = True
except NameError:
testing_mode = False
try:
if log_file_path == "" or testing_mode:
log_file_path = os.path.join(
_config.get_config_dir("astropy"), "astropy.log"
)
else:
log_file_path = os.path.expanduser(log_file_path)
encoding = conf.log_file_encoding if conf.log_file_encoding else None
fh = logging.FileHandler(log_file_path, encoding=encoding)
except OSError as e:
warnings.warn(
f"log file {log_file_path!r} could not be opened for writing:"
f" {str(e)}",
RuntimeWarning,
)
else:
formatter = logging.Formatter(conf.log_file_format)
fh.setFormatter(formatter)
fh.setLevel(conf.log_file_level)
self.addHandler(fh)
if conf.log_warnings:
self.enable_warnings_logging()
if conf.log_exceptions:
self.enable_exception_logging()
class StreamHandler(logging.StreamHandler):
"""
A specialized StreamHandler that logs INFO and DEBUG messages to
stdout, and all other messages to stderr. Also provides coloring
of the output, if enabled in the parent logger.
"""
def emit(self, record):
"""
The formatter for stderr.
"""
if record.levelno <= logging.INFO:
stream = sys.stdout
else:
stream = sys.stderr
if record.levelno < logging.DEBUG or not _conf.use_color:
print(record.levelname, end="", file=stream)
else:
# Import utils.console only if necessary and at the latest because
# the import takes a significant time [#4649]
from .utils.console import color_print
if record.levelno < logging.INFO:
color_print(record.levelname, "magenta", end="", file=stream)
elif record.levelno < logging.WARN:
color_print(record.levelname, "green", end="", file=stream)
elif record.levelno < logging.ERROR:
color_print(record.levelname, "brown", end="", file=stream)
else:
color_print(record.levelname, "red", end="", file=stream)
record.message = f"{record.msg} [{record.origin:s}]"
print(": " + record.message, file=stream)
class FilterOrigin:
"""A filter for the record origin."""
def __init__(self, origin):
self.origin = origin
def filter(self, record):
return record.origin.startswith(self.origin)
class ListHandler(logging.Handler):
"""A handler that can be used to capture the records in a list."""
def __init__(self, filter_level=None, filter_origin=None):
logging.Handler.__init__(self)
self.log_list = []
def emit(self, record):
self.log_list.append(record)
|
3f93427a52347a9b5e680d257c289e769955277c65de329b4ef0ee4766e4a682 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory.
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# See sphinx_astropy.conf for which values are set there.
import configparser
import doctest
import os
import sys
from datetime import datetime
from importlib import metadata
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
# -- Check for missing dependencies -------------------------------------------
missing_requirements = {}
for line in metadata.requires("astropy"):
if 'extra == "docs"' in line:
req = Requirement(line.split(";")[0])
req_package = req.name.lower()
req_specifier = str(req.specifier)
try:
version = metadata.version(req_package)
except metadata.PackageNotFoundError:
missing_requirements[req_package] = req_specifier
if version not in SpecifierSet(req_specifier, prereleases=True):
missing_requirements[req_package] = req_specifier
if missing_requirements:
print(
"The following packages could not be found and are required to "
"build the documentation:"
)
for key, val in missing_requirements.items():
print(f" * {key} {val}")
print('Please install the "docs" requirements.')
sys.exit(1)
from sphinx_astropy.conf.v1 import * # noqa: E402
from sphinx_astropy.conf.v1 import ( # noqa: E402
exclude_patterns,
extensions,
intersphinx_mapping,
numpydoc_xref_aliases,
numpydoc_xref_astropy_aliases,
numpydoc_xref_ignore,
rst_epilog,
)
# -- Plot configuration -------------------------------------------------------
plot_rcparams = {
"axes.labelsize": "large",
"figure.figsize": (6, 6),
"figure.subplot.hspace": 0.5,
"savefig.bbox": "tight",
"savefig.facecolor": "none",
}
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ["png", "svg", "pdf"]
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.0"
# The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for
# the benefit of other packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping["astropy"]
# add any custom intersphinx for astropy
intersphinx_mapping.update(
{
"astropy-dev": ("https://docs.astropy.org/en/latest/", None),
"pyerfa": ("https://pyerfa.readthedocs.io/en/stable/", None),
"pytest": ("https://docs.pytest.org/en/stable/", None),
"ipython": ("https://ipython.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"sphinx_automodapi": (
"https://sphinx-automodapi.readthedocs.io/en/stable/",
None,
),
"packagetemplate": (
"https://docs.astropy.org/projects/package-template/en/latest/",
None,
),
"asdf-astropy": ("https://asdf-astropy.readthedocs.io/en/latest/", None),
"fsspec": ("https://filesystem-spec.readthedocs.io/en/latest/", None),
}
)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# .inc.rst mean *include* files, don't have sphinx process them
exclude_patterns += ["_templates", "changes", "_pkgtemplate.rst", "**/*.inc.rst"]
# Add any paths that contain templates here, relative to this directory.
if "templates_path" not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append("_templates")
extensions += ["sphinx_changelog"]
# Grab minversion from setup.cfg
setup_cfg = configparser.ConfigParser()
setup_cfg.read(os.path.join(os.path.pardir, "setup.cfg"))
__minimum_python_version__ = setup_cfg["options"]["python_requires"].replace(">=", "")
min_versions = {}
for line in metadata.requires("astropy"):
req = Requirement(line.split(";")[0])
min_versions[req.name.lower()] = str(req.specifier)
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
with open("common_links.txt") as cl:
rst_epilog += cl.read().format(
minimum_python=__minimum_python_version__, **min_versions
)
# Manually register doctest options since matplotlib 3.5 messed up allowing them
# from pytest-doctestplus
IGNORE_OUTPUT = doctest.register_optionflag("IGNORE_OUTPUT")
REMOTE_DATA = doctest.register_optionflag("REMOTE_DATA")
FLOAT_CMP = doctest.register_optionflag("FLOAT_CMP")
# Whether to create cross-references for the parameter types in the
# Parameters, Other Parameters, Returns and Yields sections of the docstring.
numpydoc_xref_param_type = True
# Words not to cross-reference. Most likely, these are common words used in
# parameter type descriptions that may be confused for classes of the same
# name. The base set comes from sphinx-astropy. We add more here.
numpydoc_xref_ignore.update(
{
"mixin",
"Any", # aka something that would be annotated with `typing.Any`
# needed in subclassing numpy # TODO! revisit
"Arguments",
"Path",
# TODO! not need to ignore.
"flag",
"bits",
}
)
# Mappings to fully qualified paths (or correct ReST references) for the
# aliases/shortcuts used when specifying the types of parameters.
# Numpy provides some defaults
# https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94
# and a base set comes from sphinx-astropy.
# so here we mostly need to define Astropy-specific x-refs
numpydoc_xref_aliases.update(
{
# python & adjacent
"Any": "`~typing.Any`",
"file-like": ":term:`python:file-like object`",
"file": ":term:`python:file object`",
"path-like": ":term:`python:path-like object`",
"module": ":term:`python:module`",
"buffer-like": ":term:buffer-like",
"hashable": ":term:`python:hashable`",
# for matplotlib
"color": ":term:`color`",
# for numpy
"ints": ":class:`python:int`",
# for astropy
"number": ":term:`number`",
"Representation": ":class:`~astropy.coordinates.BaseRepresentation`",
"writable": ":term:`writable file-like object`",
"readable": ":term:`readable file-like object`",
"BaseHDU": ":doc:`HDU </io/fits/api/hdus>`",
}
)
# Add from sphinx-astropy 1) glossary aliases 2) physical types.
numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases)
# Turn off table of contents entries for functions and classes
toc_object_entries = False
# -- Project information ------------------------------------------------------
project = "Astropy"
author = "The Astropy Developers"
copyright = f"2011–{datetime.utcnow().year}, " + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = metadata.version(project)
# The short X.Y version.
version = ".".join(release.split(".")[:2])
# Only include dev docs in dev version.
dev = "dev" in release
if not dev:
exclude_patterns += ["development/*", "testhelpers.rst"]
# -- Options for the module index ---------------------------------------------
modindex_common_prefix = ["astropy."]
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f"{project} v{release}"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {"to_be_indexed": ["stable", "latest"], "is_development": dev}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ["robots.txt"]
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", project + ".tex", project + " Documentation", author, "manual")
]
latex_logo = "_static/astropy_logo.pdf"
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", project.lower(), project + " Documentation", [author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = "https://github.com/astropy/astropy/issues/"
edit_on_github_branch = "main"
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
# See docs/nitpick-exceptions file for the actual listing.
nitpick_ignore = []
for line in open("nitpick-exceptions"):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
nitpick_ignore.append((dtype, target.strip()))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import warnings
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
"backreferences_dir": "generated/modules", # path to store the module using example template
"filename_pattern": "^((?!skip_).)*$", # execute all examples except those that start with "skip_"
"examples_dirs": f"..{os.sep}examples", # path to the examples scripts
"gallery_dirs": "generated/examples", # path to save gallery generated examples
"reference_url": {
"astropy": None,
"matplotlib": "https://matplotlib.org/stable/",
"numpy": "https://numpy.org/doc/stable/",
},
"abort_on_example_error": True,
}
# Filter out backend-related warnings as described in
# https://github.com/sphinx-gallery/sphinx-gallery/pull/564
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=(
"Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure."
),
)
except ImportError:
sphinx_gallery = None
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = [
"https://journals.aas.org/manuscript-preparation/",
"https://maia.usno.navy.mil/",
"https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer",
"https://aa.usno.navy.mil/publications/docs/Circular_179.php",
"http://data.astropy.org",
"https://doi.org/10.1017/S0251107X00002406", # internal server error
"https://doi.org/10.1017/pasa.2013.31", # internal server error
"https://www.tandfonline.com/", # 403 Client Error: Forbidden
"https://pyfits.readthedocs.io/en/v3.2.1/", # defunct page in CHANGES.rst
r"https://github\.com/astropy/astropy/(?:issues|pull)/\d+",
]
linkcheck_timeout = 180
linkcheck_anchors = False
def rstjinja(app, docname, source):
"""Render pages as a jinja template to hide/show dev docs."""
# Make sure we're outputting HTML
if app.builder.format != "html":
return
files_to_render = ["index", "install"]
if docname in files_to_render:
print(f"Jinja rendering {docname}")
rendered = app.builder.templates.render_string(
source[0], app.config.html_context
)
source[0] = rendered
def resolve_astropy_and_dev_reference(app, env, node, contnode):
"""
Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases.
Documentation links in astropy can be set up as intersphinx links so that
affiliate packages do not have to override the docstrings when building
the docs.
If we are building the development docs it is a local ref targeting the
label ``astropy-dev:<label>``, but for stable docs it should be an
intersphinx resolution to the development docs.
See https://github.com/astropy/astropy/issues/11366
"""
# should the node be processed?
reftarget = node.get("reftarget") # str or None
if str(reftarget).startswith("astropy:"):
# This allows Astropy to use intersphinx links to itself and have
# them resolve to local links. Downstream packages will see intersphinx.
# TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented.
process, replace = True, "astropy:"
elif dev and str(reftarget).startswith("astropy-dev:"):
process, replace = True, "astropy-dev:"
else:
process, replace = False, ""
# make link local
if process:
reftype = node.get("reftype")
refdoc = node.get("refdoc", app.env.docname)
# convert astropy intersphinx targets to local links.
# there are a few types of intersphinx link patterns, as described in
# https://docs.readthedocs.io/en/stable/guides/intersphinx.html
reftarget = reftarget.replace(replace, "")
if reftype == "doc": # also need to replace the doc link
node.replace_attr("reftarget", reftarget)
# Delegate to the ref node's original domain/target (typically :ref:)
try:
domain = app.env.domains[node["refdomain"]]
return domain.resolve_xref(
app.env, refdoc, app.builder, reftype, reftarget, node, contnode
)
except Exception:
pass
# Otherwise return None which should delegate to intersphinx
def setup(app):
if sphinx_gallery is None:
msg = (
"The sphinx_gallery extension is not installed, so the "
"gallery will not be built. You will probably see "
"additional warnings about undefined references due "
"to this."
)
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# Generate the page from Jinja template
app.connect("source-read", rstjinja)
# Set this to higher priority than intersphinx; this way when building
# dev docs astropy-dev: targets will go to the local docs instead of the
# intersphinx mapping
app.connect("missing-reference", resolve_astropy_and_dev_reference, priority=400)
|
b9769a0ad61a5919a8dee879dffc830f8101bb790c9823f3c9c6fef04823f001 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file needs to be included here to make sure commands such
# as ``pytest docs/...`` works, since this
# will ignore the conftest.py file at the root of the repository
# and the one in astropy/conftest.py
import os
import tempfile
import pytest
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ["XDG_CONFIG_HOME"] = tempfile.mkdtemp("astropy_config")
os.environ["XDG_CACHE_HOME"] = tempfile.mkdtemp("astropy_cache")
os.mkdir(os.path.join(os.environ["XDG_CONFIG_HOME"], "astropy"))
os.mkdir(os.path.join(os.environ["XDG_CACHE_HOME"], "astropy"))
# Note that we don't need to change the environment variables back or remove
# them after testing, because they are only changed for the duration of the
# Python process, and this configuration only matters if running pytest
# directly, not from e.g. an IPython session.
@pytest.fixture(autouse=True)
def _docdir(request):
"""Run doctests in isolated tmp_path so outputs do not end up in repo."""
# Trigger ONLY for doctestplus
doctest_plugin = request.config.pluginmanager.getplugin("doctestplus")
if isinstance(request.node.parent, doctest_plugin._doctest_textfile_item_cls):
# Don't apply this fixture to io.rst. It reads files and doesn't write.
# Implementation from https://github.com/pytest-dev/pytest/discussions/10437
if "io.rst" not in request.node.name:
old_cwd = os.getcwd()
tmp_path = request.getfixturevalue("tmp_path")
os.chdir(tmp_path)
yield
os.chdir(old_cwd)
else:
yield
else:
yield
|
82abdf5ca34f67e943209d86526f28e0a365498e0ab830869ca227ec44414ab3 | r"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy:astropy-coordinates-design` and
the docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example,
we will define a coordinate system defined by the plane of orbit of the
Sagittarius Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003).
The Sgr coordinate system is often referred to in terms of two angular
coordinates, :math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page https://www.stsci.edu/~dlaw/Sgr/
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import matplotlib.pyplot as plt
import numpy as np
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
import astropy.coordinates as coord
import astropy.units as u
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
https://ui.adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
https://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `~astropy.coordinates.Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `~astropy.coordinates.Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity`, optional, keyword-only
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
SGR_MATRIX = (
np.diag([1.,1.,-1.])
@ rotation_matrix(SGR_PSI, "z")
@ rotation_matrix(SGR_THETA, "x")
@ rotation_matrix(SGR_PHI, "z")
)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
"""Compute the Galactic spherical to heliocentric Sgr transformation matrix."""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
"""Compute the heliocentric Sgr to spherical Galactic transformation matrix."""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.SkyCoord(280.161732*u.degree, 11.91934*u.degree, frame='icrs')
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian, frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr,
frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(
fr"$\mu_\Lambda \, \cos B$ [{sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')}]")
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(
fr"$\mu_\alpha \, \cos\delta$ [{icrs.pm_ra_cosdec.unit.to_string('latex_inline')}]")
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(
fr"$\mu_\delta$ [{icrs.pm_dec.unit.to_string('latex_inline')}]")
plt.show()
|
2a38c2f8c885c2c41d94c234c4585eec3d7fe6786fe58788b40179a9b3c95f1e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from numpy.core.multiarray import normalize_axis_index
from astropy.stats._fast_sigma_clip import _sigma_clip_fast
from astropy.stats.funcs import mad_std
from astropy.units import Quantity
from astropy.utils import isiterable
from astropy.utils.compat.optional_deps import HAS_BOTTLENECK
from astropy.utils.exceptions import AstropyUserWarning
if HAS_BOTTLENECK:
import bottleneck
__all__ = ["SigmaClip", "sigma_clip", "sigma_clipped_stats"]
def _move_tuple_axes_first(array, axis):
"""
Bottleneck can only take integer axis, not tuple, so this function
takes all the axes to be operated on and combines them into the
first dimension of the array so that we can then use axis=0.
"""
# Figure out how many axes we are operating over
naxis = len(axis)
# Add remaining axes to the axis tuple
axis += tuple(i for i in range(array.ndim) if i not in axis)
# The new position of each axis is just in order
destination = tuple(range(array.ndim))
# Reorder the array so that the axes being operated on are at the
# beginning
array_new = np.moveaxis(array, axis, destination)
# Collapse the dimensions being operated on into a single dimension
# so that we can then use axis=0 with the bottleneck functions
array_new = array_new.reshape((-1,) + array_new.shape[naxis:])
return array_new
def _nanmean(array, axis=None):
"""Bottleneck nanmean function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanmean(array, axis=axis))
else:
return bottleneck.nanmean(array, axis=axis)
def _nanmedian(array, axis=None):
"""Bottleneck nanmedian function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanmedian(array, axis=axis))
else:
return bottleneck.nanmedian(array, axis=axis)
def _nanstd(array, axis=None, ddof=0):
"""Bottleneck nanstd function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanstd(array, axis=axis, ddof=ddof))
else:
return bottleneck.nanstd(array, axis=axis, ddof=ddof)
def _nanmadstd(array, axis=None):
"""mad_std function that ignores NaNs by default."""
return mad_std(array, axis=axis, ignore_nan=True)
class SigmaClip:
"""
Class to perform sigma clipping.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < center - (sigma_lower * std)
data > center + (sigma_upper * std)
where::
center = cenfunc(data [, axis=])
std = stdfunc(data [, axis=])
Invalid data values (i.e., NaN or inf) are automatically clipped.
For a functional interface to sigma clipping, see
:func:`sigma_clip`.
.. note::
`scipy.stats.sigmaclip` provides a subset of the functionality
in this class. Also, its input data cannot be a masked array
and it does not handle data that contains invalid values (i.e.,
NaN or inf). Also note that it uses the mean as the centering
function. The equivalent settings to `scipy.stats.sigmaclip`
are::
sigclip = SigmaClip(sigma=4., cenfunc='mean', maxiters=None)
sigclip(data, axis=None, masked=False, return_bounds=True)
Parameters
----------
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
See Also
--------
sigma_clip, sigma_clipped_stats
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=2, maxiters=5)
>>> filtered_data = sigclip(randvar)
This example clips all points that are more than 3 sigma relative
to the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and modifies the data in-place::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=3, maxiters=None, cenfunc='mean')
>>> filtered_data = sigclip(randvar, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> sigclip = SigmaClip(sigma=2.3)
>>> filtered_data = sigclip(data, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
def __init__(
self,
sigma=3.0,
sigma_lower=None,
sigma_upper=None,
maxiters=5,
cenfunc="median",
stdfunc="std",
grow=False,
):
self.sigma = sigma
self.sigma_lower = sigma_lower or sigma
self.sigma_upper = sigma_upper or sigma
self.maxiters = maxiters or np.inf
self.cenfunc = cenfunc
self.stdfunc = stdfunc
self._cenfunc_parsed = self._parse_cenfunc(cenfunc)
self._stdfunc_parsed = self._parse_stdfunc(stdfunc)
self._min_value = np.nan
self._max_value = np.nan
self._niterations = 0
self.grow = grow
# This just checks that SciPy is available, to avoid failing
# later than necessary if __call__ needs it:
if self.grow:
from scipy.ndimage import binary_dilation
self._binary_dilation = binary_dilation
def __repr__(self):
return (
f"SigmaClip(sigma={self.sigma}, sigma_lower={self.sigma_lower},"
f" sigma_upper={self.sigma_upper}, maxiters={self.maxiters},"
f" cenfunc={self.cenfunc!r}, stdfunc={self.stdfunc!r}, grow={self.grow})"
)
def __str__(self):
lines = ["<" + self.__class__.__name__ + ">"]
attrs = [
"sigma",
"sigma_lower",
"sigma_upper",
"maxiters",
"cenfunc",
"stdfunc",
"grow",
]
for attr in attrs:
lines.append(f" {attr}: {repr(getattr(self, attr))}")
return "\n".join(lines)
@staticmethod
def _parse_cenfunc(cenfunc):
if isinstance(cenfunc, str):
if cenfunc == "median":
if HAS_BOTTLENECK:
cenfunc = _nanmedian
else:
cenfunc = np.nanmedian # pragma: no cover
elif cenfunc == "mean":
if HAS_BOTTLENECK:
cenfunc = _nanmean
else:
cenfunc = np.nanmean # pragma: no cover
else:
raise ValueError(f"{cenfunc} is an invalid cenfunc.")
return cenfunc
@staticmethod
def _parse_stdfunc(stdfunc):
if isinstance(stdfunc, str):
if stdfunc == "std":
if HAS_BOTTLENECK:
stdfunc = _nanstd
else:
stdfunc = np.nanstd # pragma: no cover
elif stdfunc == "mad_std":
stdfunc = _nanmadstd
else:
raise ValueError(f"{stdfunc} is an invalid stdfunc.")
return stdfunc
def _compute_bounds(self, data, axis=None):
# ignore RuntimeWarning if the array (or along an axis) has only
# NaNs
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
self._max_value = self._cenfunc_parsed(data, axis=axis)
std = self._stdfunc_parsed(data, axis=axis)
self._min_value = self._max_value - (std * self.sigma_lower)
self._max_value += std * self.sigma_upper
def _sigmaclip_fast(
self, data, axis=None, masked=True, return_bounds=False, copy=True
):
"""
Fast C implementation for simple use cases.
"""
if isinstance(data, Quantity):
data, unit = data.value, data.unit
else:
unit = None
if copy is False and masked is False and data.dtype.kind != "f":
raise Exception(
"cannot mask non-floating-point array with NaN "
"values, set copy=True or masked=True to avoid "
"this."
)
if axis is None:
axis = -1 if data.ndim == 1 else tuple(range(data.ndim))
if not isiterable(axis):
axis = normalize_axis_index(axis, data.ndim)
data_reshaped = data
transposed_shape = None
else:
# The gufunc implementation does not handle non-scalar axis
# so we combine the dimensions together as the last
# dimension and set axis=-1
axis = tuple(normalize_axis_index(ax, data.ndim) for ax in axis)
transposed_axes = (
tuple(ax for ax in range(data.ndim) if ax not in axis) + axis
)
data_transposed = data.transpose(transposed_axes)
transposed_shape = data_transposed.shape
data_reshaped = data_transposed.reshape(
transposed_shape[: data.ndim - len(axis)] + (-1,)
)
axis = -1
if data_reshaped.dtype.kind != "f" or data_reshaped.dtype.itemsize > 8:
data_reshaped = data_reshaped.astype(float)
mask = ~np.isfinite(data_reshaped)
if np.any(mask):
warnings.warn(
"Input data contains invalid values (NaNs or "
"infs), which were automatically clipped.",
AstropyUserWarning,
)
if isinstance(data_reshaped, np.ma.MaskedArray):
mask |= data_reshaped.mask
data = data.view(np.ndarray)
data_reshaped = data_reshaped.view(np.ndarray)
mask = np.broadcast_to(mask, data_reshaped.shape).copy()
bound_lo, bound_hi = _sigma_clip_fast(
data_reshaped,
mask,
self.cenfunc == "median",
self.stdfunc == "mad_std",
-1 if np.isinf(self.maxiters) else self.maxiters,
self.sigma_lower,
self.sigma_upper,
axis=axis,
)
with np.errstate(invalid="ignore"):
mask |= data_reshaped < np.expand_dims(bound_lo, axis)
mask |= data_reshaped > np.expand_dims(bound_hi, axis)
if transposed_shape is not None:
# Get mask in shape of data.
mask = mask.reshape(transposed_shape)
mask = mask.transpose(
tuple(transposed_axes.index(ax) for ax in range(data.ndim))
)
if masked:
result = np.ma.array(data, mask=mask, copy=copy)
else:
if copy:
result = data.astype(float, copy=True)
else:
result = data
result[mask] = np.nan
if unit is not None:
result = result << unit
bound_lo = bound_lo << unit
bound_hi = bound_hi << unit
if return_bounds:
return result, bound_lo, bound_hi
else:
return result
def _sigmaclip_noaxis(self, data, masked=True, return_bounds=False, copy=True):
"""
Sigma clip when ``axis`` is None and ``grow`` is not >0.
In this simple case, we remove clipped elements from the
flattened array during each iteration.
"""
filtered_data = data.ravel()
# remove masked values and convert to ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = filtered_data.data[~filtered_data.mask]
# remove invalid values
good_mask = np.isfinite(filtered_data)
if np.any(~good_mask):
filtered_data = filtered_data[good_mask]
warnings.warn(
"Input data contains invalid values (NaNs or "
"infs), which were automatically clipped.",
AstropyUserWarning,
)
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
size = filtered_data.size
self._compute_bounds(filtered_data, axis=None)
filtered_data = filtered_data[
(filtered_data >= self._min_value) & (filtered_data <= self._max_value)
]
nchanged = size - filtered_data.size
self._niterations = iteration
if masked:
# return a masked array and optional bounds
filtered_data = np.ma.masked_invalid(data, copy=copy)
# update the mask in place, ignoring RuntimeWarnings for
# comparisons with NaN data values
with np.errstate(invalid="ignore"):
filtered_data.mask |= np.logical_or(
data < self._min_value, data > self._max_value
)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def _sigmaclip_withaxis(
self, data, axis=None, masked=True, return_bounds=False, copy=True
):
"""
Sigma clip the data when ``axis`` or ``grow`` is specified.
In this case, we replace clipped values with NaNs as placeholder
values.
"""
# float array type is needed to insert nans into the array
filtered_data = data.astype(float) # also makes a copy
# remove invalid values
bad_mask = ~np.isfinite(filtered_data)
if np.any(bad_mask):
filtered_data[bad_mask] = np.nan
warnings.warn(
"Input data contains invalid values (NaNs or "
"infs), which were automatically clipped.",
AstropyUserWarning,
)
# remove masked values and convert to plain ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = np.ma.masked_invalid(filtered_data).astype(float)
filtered_data = filtered_data.filled(np.nan)
if axis is not None:
# convert negative axis/axes
if not isiterable(axis):
axis = (axis,)
axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis)
# define the shape of min/max arrays so that they can be broadcast
# with the data
mshape = tuple(
1 if dim in axis else size
for dim, size in enumerate(filtered_data.shape)
)
if self.grow:
# Construct a growth kernel from the specified radius in
# pixels (consider caching this for re-use by subsequent
# calls?):
cenidx = int(self.grow)
size = 2 * cenidx + 1
indices = np.mgrid[(slice(0, size),) * data.ndim]
if axis is not None:
for n, dim in enumerate(indices):
# For any axes that we're not clipping over, set
# their indices outside the growth radius, so masked
# points won't "grow" in that dimension:
if n not in axis:
dim[dim != cenidx] = size
kernel = sum((idx - cenidx) ** 2 for idx in indices) <= self.grow**2
del indices
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
self._compute_bounds(filtered_data, axis=axis)
if not np.isscalar(self._min_value):
self._min_value = self._min_value.reshape(mshape)
self._max_value = self._max_value.reshape(mshape)
with np.errstate(invalid="ignore"):
# Since these comparisons are always False for NaNs, the
# resulting mask contains only newly-rejected pixels and
# we can dilate it without growing masked pixels more
# than once.
new_mask = (filtered_data < self._min_value) | (
filtered_data > self._max_value
)
if self.grow:
new_mask = self._binary_dilation(new_mask, kernel)
filtered_data[new_mask] = np.nan
nchanged = np.count_nonzero(new_mask)
del new_mask
self._niterations = iteration
if masked:
# create an output masked array
if copy:
filtered_data = np.ma.MaskedArray(
data, ~np.isfinite(filtered_data), copy=True
)
else:
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
out = np.ma.masked_invalid(data, copy=False)
filtered_data = np.ma.masked_where(
np.logical_or(out < self._min_value, out > self._max_value),
out,
copy=False,
)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def __call__(self, data, axis=None, masked=True, return_bounds=False, copy=True):
"""
Perform sigma clipping on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If
`None`, then the flattened data will be used. ``axis`` is
passed to the ``cenfunc`` and ``stdfunc``. The default is
`None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` is returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are
also returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If
`False` and ``masked=True``, then the returned masked array
data will contain the same array as the input ``data`` (if
``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`).
If `False` and ``masked=False``, the input data is modified
in-place. The default is `True`.
Returns
-------
result : array-like
If ``masked=True``, then a `~numpy.ma.MaskedArray` is
returned, where the mask is `True` for clipped values and
where the input mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked
array or array above, the minimum and maximum clipping
bounds are returned.
If ``masked=False`` and ``axis=None``, then the output
array is a flattened 1D `~numpy.ndarray` where the clipped
values have been removed. If ``return_bounds=True`` then the
returned minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the
output `~numpy.ndarray` will have the same shape as the
input ``data`` and contain ``np.nan`` where values were
clipped. If the input ``data`` was a masked array, then the
output `~numpy.ndarray` will also contain ``np.nan`` where
the input mask was `True`. If ``return_bounds=True`` then
the returned minimum and maximum clipping thresholds will be
be `~numpy.ndarray`\\s.
"""
data = np.asanyarray(data)
if data.size == 0:
if masked:
result = np.ma.MaskedArray(data)
else:
result = data
if return_bounds:
return result, self._min_value, self._max_value
else:
return result
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
if masked:
result = data
else:
result = np.full(data.shape, np.nan)
if return_bounds:
return result, self._min_value, self._max_value
else:
return result
# Shortcut for common cases where a fast C implementation can be
# used.
if (
self.cenfunc in ("mean", "median")
and self.stdfunc in ("std", "mad_std")
and axis is not None
and not self.grow
):
return self._sigmaclip_fast(
data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy
)
# These two cases are treated separately because when
# ``axis=None`` we can simply remove clipped values from the
# array. This is not possible when ``axis`` or ``grow`` is
# specified.
if axis is None and not self.grow:
return self._sigmaclip_noaxis(
data, masked=masked, return_bounds=return_bounds, copy=copy
)
else:
return self._sigmaclip_withaxis(
data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy
)
def sigma_clip(
data,
sigma=3,
sigma_lower=None,
sigma_upper=None,
maxiters=5,
cenfunc="median",
stdfunc="std",
axis=None,
masked=True,
return_bounds=False,
copy=True,
grow=False,
):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < center - (sigma_lower * std)
data > center + (sigma_upper * std)
where::
center = cenfunc(data [, axis=])
std = stdfunc(data [, axis=])
Invalid data values (i.e., NaN or inf) are automatically clipped.
For an object-oriented interface to sigma clipping, see
:class:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip` provides a subset of the functionality
in this class. Also, its input data cannot be a masked array
and it does not handle data that contains invalid values (i.e.,
NaN or inf). Also note that it uses the mean as the centering
function. The equivalent settings to `scipy.stats.sigmaclip`
are::
sigma_clip(sigma=4., cenfunc='mean', maxiters=None, axis=None,
... masked=False, return_bounds=True)
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` and the minimum and maximum clipping thresholds
are returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are also
returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If `False`
and ``masked=True``, then the returned masked array data will
contain the same array as the input ``data`` (if ``data`` is a
`~numpy.ndarray` or `~numpy.ma.MaskedArray`). If `False` and
``masked=False``, the input data is modified in-place. The
default is `True`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Returns
-------
result : array-like
If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned,
where the mask is `True` for clipped values and where the input
mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked array
or array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array
is a flattened 1D `~numpy.ndarray` where the clipped values
have been removed. If ``return_bounds=True`` then the returned
minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the output
`~numpy.ndarray` will have the same shape as the input ``data``
and contain ``np.nan`` where values were clipped. If the input
``data`` was a masked array, then the output `~numpy.ndarray`
will also contain ``np.nan`` where the input mask was `True`.
If ``return_bounds=True`` then the returned minimum and maximum
clipping thresholds will be be `~numpy.ndarray`\\s.
See Also
--------
SigmaClip, sigma_clipped_stats
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5)
This example clips all points that are more than 3 sigma relative
to the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
... cenfunc=mean, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
sigclip = SigmaClip(
sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
maxiters=maxiters,
cenfunc=cenfunc,
stdfunc=stdfunc,
grow=grow,
)
return sigclip(
data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy
)
def sigma_clipped_stats(
data,
mask=None,
mask_value=None,
sigma=3.0,
sigma_lower=None,
sigma_upper=None,
maxiters=5,
cenfunc="median",
stdfunc="std",
std_ddof=0,
axis=None,
grow=False,
):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
return np.ma.masked, np.ma.masked, np.ma.masked
sigclip = SigmaClip(
sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
maxiters=maxiters,
cenfunc=cenfunc,
stdfunc=stdfunc,
grow=grow,
)
data_clipped = sigclip(
data, axis=axis, masked=False, return_bounds=False, copy=True
)
if HAS_BOTTLENECK:
mean = _nanmean(data_clipped, axis=axis)
median = _nanmedian(data_clipped, axis=axis)
std = _nanstd(data_clipped, ddof=std_ddof, axis=axis)
else: # pragma: no cover
mean = np.nanmean(data_clipped, axis=axis)
median = np.nanmedian(data_clipped, axis=axis)
std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis)
return mean, median, std
|
f2d12d78580a0541af11775188f05d1712298b17399755dc5920df9d55c506e0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Bayesian Blocks for Time Series Analysis.
Bayesian Blocks for Time Series Analysis
========================================
Dynamic programming algorithm for solving a piecewise-constant model for
various datasets. This is based on the algorithm presented in Scargle
et al 2013 [1]_. This code was ported from the astroML project [2]_.
Applications include:
- finding an optimal histogram with adaptive bin widths
- finding optimal segmentation of time series data
- detecting inflection points in the rate of event data
The primary interface to these routines is the :func:`bayesian_blocks`
function. This module provides fitness functions suitable for three types
of data:
- Irregularly-spaced event data via the :class:`Events` class
- Regularly-spaced event data via the :class:`RegularEvents` class
- Irregularly-spaced point measurements via the :class:`PointMeasures` class
For more fine-tuned control over the fitness functions used, it is possible
to define custom :class:`FitnessFunc` classes directly and use them with
the :func:`bayesian_blocks` routine.
One common application of the Bayesian Blocks algorithm is the determination
of optimal adaptive-width histogram bins. This uses the same fitness function
as for irregularly-spaced time series events. The easiest interface for
creating Bayesian Blocks histograms is the :func:`astropy.stats.histogram`
function.
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S
.. [2] https://www.astroml.org/ https://github.com//astroML/astroML/
.. [3] Bellman, R.E., Dreyfus, S.E., 1962. Applied Dynamic
Programming. Princeton University Press, Princeton.
https://press.princeton.edu/books/hardcover/9780691651873/applied-dynamic-programming
.. [4] Bellman, R., Roth, R., 1969. Curve fitting by segmented
straight lines. J. Amer. Statist. Assoc. 64, 1079–1084.
https://www.tandfonline.com/doi/abs/10.1080/01621459.1969.10501038
"""
import warnings
from inspect import signature
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
# TODO: implement other fitness functions from appendix C of Scargle 2013
__all__ = ["FitnessFunc", "Events", "RegularEvents", "PointMeasures", "bayesian_blocks"]
def bayesian_blocks(t, x=None, sigma=None, fitness="events", **kwargs):
r"""Compute optimal segmentation of data with Scargle's Bayesian Blocks.
This is a flexible implementation of the Bayesian Blocks algorithm
described in Scargle 2013 [1]_.
Parameters
----------
t : array-like
data times (one dimensional, length N)
x : array-like, optional
data values
sigma : array-like or float, optional
data errors
fitness : str or object
the fitness function to use for the model.
If a string, the following options are supported:
- 'events' : binned or unbinned event data. Arguments are ``gamma``,
which gives the slope of the prior on the number of bins, or
``ncp_prior``, which is :math:`-\ln({\tt gamma})`.
- 'regular_events' : non-overlapping events measured at multiples of a
fundamental tick rate, ``dt``, which must be specified as an
additional argument. Extra arguments are ``p0``, which gives the
false alarm probability to compute the prior, or ``gamma``, which
gives the slope of the prior on the number of bins, or ``ncp_prior``,
which is :math:`-\ln({\tt gamma})`.
- 'measures' : fitness for a measured sequence with Gaussian errors.
Extra arguments are ``p0``, which gives the false alarm probability
to compute the prior, or ``gamma``, which gives the slope of the
prior on the number of bins, or ``ncp_prior``, which is
:math:`-\ln({\tt gamma})`.
In all three cases, if more than one of ``p0``, ``gamma``, and
``ncp_prior`` is chosen, ``ncp_prior`` takes precedence over ``gamma``
which takes precedence over ``p0``.
Alternatively, the fitness parameter can be an instance of
:class:`FitnessFunc` or a subclass thereof.
**kwargs :
any additional keyword arguments will be passed to the specified
:class:`FitnessFunc` derived class.
Returns
-------
edges : ndarray
array containing the (N+1) edges defining the N bins
Examples
--------
.. testsetup::
>>> np.random.seed(12345)
Event data:
>>> t = np.random.normal(size=100)
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Event data with repeats:
>>> t = np.random.normal(size=100)
>>> t[80:] = t[:20]
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Regular event data:
>>> dt = 0.05
>>> t = dt * np.arange(1000)
>>> x = np.zeros(len(t))
>>> x[np.random.randint(0, len(t), len(t) // 10)] = 1
>>> edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
Measured point data with errors:
>>> t = 100 * np.random.random(100)
>>> x = np.exp(-0.5 * (t - 50) ** 2)
>>> sigma = 0.1
>>> x_obs = np.random.normal(x, sigma)
>>> edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
References
----------
.. [1] Scargle, J et al. (2013)
https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S
.. [2] Bellman, R.E., Dreyfus, S.E., 1962. Applied Dynamic
Programming. Princeton University Press, Princeton.
https://press.princeton.edu/books/hardcover/9780691651873/applied-dynamic-programming
.. [3] Bellman, R., Roth, R., 1969. Curve fitting by segmented
straight lines. J. Amer. Statist. Assoc. 64, 1079–1084.
https://www.tandfonline.com/doi/abs/10.1080/01621459.1969.10501038
See Also
--------
astropy.stats.histogram : compute a histogram using bayesian blocks
"""
FITNESS_DICT = {
"events": Events,
"regular_events": RegularEvents,
"measures": PointMeasures,
}
fitness = FITNESS_DICT.get(fitness, fitness)
if type(fitness) is type and issubclass(fitness, FitnessFunc):
fitfunc = fitness(**kwargs)
elif isinstance(fitness, FitnessFunc):
fitfunc = fitness
else:
raise ValueError("fitness parameter not understood")
return fitfunc.fit(t, x, sigma)
class FitnessFunc:
"""Base class for bayesian blocks fitness functions.
Derived classes should overload the following method:
``fitness(self, **kwargs)``:
Compute the fitness given a set of named arguments.
Arguments accepted by fitness must be among ``[T_k, N_k, a_k, b_k, c_k]``
(See [1]_ for details on the meaning of these parameters).
Additionally, other methods may be overloaded as well:
``__init__(self, **kwargs)``:
Initialize the fitness function with any parameters beyond the normal
``p0`` and ``gamma``.
``validate_input(self, t, x, sigma)``:
Enable specific checks of the input data (``t``, ``x``, ``sigma``)
to be performed prior to the fit.
``compute_ncp_prior(self, N)``: If ``ncp_prior`` is not defined explicitly,
this function is called in order to define it before fitting. This may be
calculated from ``gamma``, ``p0``, or whatever method you choose.
``p0_prior(self, N)``:
Specify the form of the prior given the false-alarm probability ``p0``
(See [1]_ for details).
For examples of implemented fitness functions, see :class:`Events`,
:class:`RegularEvents`, and :class:`PointMeasures`.
References
----------
.. [1] Scargle, J et al. (2013)
https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
self.p0 = p0
self.gamma = gamma
self.ncp_prior = ncp_prior
def validate_input(self, t, x=None, sigma=None):
"""Validate inputs to the model.
Parameters
----------
t : array-like
times of observations
x : array-like, optional
values observed at each time
sigma : float or array-like, optional
errors in values x
Returns
-------
t, x, sigma : array-like, float or None
validated and perhaps modified versions of inputs
"""
# validate array input
t = np.asarray(t, dtype=float)
# find unique values of t
t = np.array(t)
if t.ndim != 1:
raise ValueError("t must be a one-dimensional array")
unq_t, unq_ind, unq_inv = np.unique(t, return_index=True, return_inverse=True)
# if x is not specified, x will be counts at each time
if x is None:
if sigma is not None:
raise ValueError("If sigma is specified, x must be specified")
else:
sigma = 1
if len(unq_t) == len(t):
x = np.ones_like(t)
else:
x = np.bincount(unq_inv)
t = unq_t
# if x is specified, then we need to simultaneously sort t and x
else:
# TODO: allow broadcasted x?
x = np.asarray(x, dtype=float)
if x.shape not in [(), (1,), (t.size,)]:
raise ValueError("x does not match shape of t")
x += np.zeros_like(t)
if len(unq_t) != len(t):
raise ValueError(
"Repeated values in t not supported when x is specified"
)
t = unq_t
x = x[unq_ind]
# verify the given sigma value
if sigma is None:
sigma = 1
else:
sigma = np.asarray(sigma, dtype=float)
if sigma.shape not in [(), (1,), (t.size,)]:
raise ValueError("sigma does not match the shape of x")
return t, x, sigma
def fitness(self, **kwargs):
raise NotImplementedError()
def p0_prior(self, N):
"""Empirical prior, parametrized by the false alarm probability ``p0``.
See eq. 21 in Scargle (2013).
Note that there was an error in this equation in the original Scargle
paper (the "log" was missing). The following corrected form is taken
from https://arxiv.org/abs/1304.2818
"""
return 4 - np.log(73.53 * self.p0 * (N**-0.478))
# the fitness_args property will return the list of arguments accepted by
# the method fitness(). This allows more efficient computation below.
@property
def _fitness_args(self):
return signature(self.fitness).parameters.keys()
def compute_ncp_prior(self, N):
"""
If ``ncp_prior`` is not explicitly defined, compute it from ``gamma``
or ``p0``.
"""
if self.gamma is not None:
return -np.log(self.gamma)
elif self.p0 is not None:
return self.p0_prior(N)
else:
raise ValueError(
"``ncp_prior`` cannot be computed as neither "
"``gamma`` nor ``p0`` is defined."
)
def fit(self, t, x=None, sigma=None):
"""Fit the Bayesian Blocks model given the specified fitness function.
Parameters
----------
t : array-like
data times (one dimensional, length N)
x : array-like, optional
data values
sigma : array-like or float, optional
data errors
Returns
-------
edges : ndarray
array containing the (M+1) edges defining the M optimal bins
"""
t, x, sigma = self.validate_input(t, x, sigma)
# compute values needed for computation, below
if "a_k" in self._fitness_args:
ak_raw = np.ones_like(x) / sigma**2
if "b_k" in self._fitness_args:
bk_raw = x / sigma**2
if "c_k" in self._fitness_args:
ck_raw = x * x / sigma**2
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1], 0.5 * (t[1:] + t[:-1]), t[-1:]])
block_length = t[-1] - edges
# arrays to store the best configuration
N = len(t)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# Compute ncp_prior if not defined
if self.ncp_prior is None:
ncp_prior = self.compute_ncp_prior(N)
else:
ncp_prior = self.ncp_prior
# ----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
# ----------------------------------------------------------------
for R in range(N):
# Compute fit_vec : fitness of putative last block (end at R)
kwds = {}
# T_k: width/duration of each block
if "T_k" in self._fitness_args:
kwds["T_k"] = block_length[: (R + 1)] - block_length[R + 1]
# N_k: number of elements in each block
if "N_k" in self._fitness_args:
kwds["N_k"] = np.cumsum(x[: (R + 1)][::-1])[::-1]
# a_k: eq. 31
if "a_k" in self._fitness_args:
kwds["a_k"] = 0.5 * np.cumsum(ak_raw[: (R + 1)][::-1])[::-1]
# b_k: eq. 32
if "b_k" in self._fitness_args:
kwds["b_k"] = -np.cumsum(bk_raw[: (R + 1)][::-1])[::-1]
# c_k: eq. 33
if "c_k" in self._fitness_args:
kwds["c_k"] = 0.5 * np.cumsum(ck_raw[: (R + 1)][::-1])[::-1]
# evaluate fitness function
fit_vec = self.fitness(**kwds)
A_R = fit_vec - ncp_prior
A_R[1:] += best[:R]
i_max = np.argmax(A_R)
last[R] = i_max
best[R] = A_R[i_max]
# ----------------------------------------------------------------
# Now find changepoints by iteratively peeling off the last block
# ----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while i_cp > 0:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
if i_cp == 0:
change_points[i_cp] = 0
change_points = change_points[i_cp:]
return edges[change_points]
class Events(FitnessFunc):
r"""Bayesian blocks fitness for binned or unbinned events.
Parameters
----------
p0 : float, optional
False alarm probability, used to compute the prior on
:math:`N_{\rm blocks}` (see eq. 21 of Scargle 2013). For the Events
type data, ``p0`` does not seem to be an accurate representation of the
actual false alarm probability. If you are using this fitness function
for a triggering type condition, it is recommended that you run
statistical trials on signal-free noise to determine an appropriate
value of ``gamma`` or ``ncp_prior`` to use for a desired false alarm
rate.
gamma : float, optional
If specified, then use this gamma to compute the general prior form,
:math:`p \sim {\tt gamma}^{N_{\rm blocks}}`. If gamma is specified, p0
is ignored.
ncp_prior : float, optional
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`.
If ``ncp_prior`` is specified, ``gamma`` and ``p0`` is ignored.
"""
def fitness(self, N_k, T_k):
# eq. 19 from Scargle 2013
return N_k * (np.log(N_k / T_k))
def validate_input(self, t, x, sigma):
t, x, sigma = super().validate_input(t, x, sigma)
if x is not None and np.any(x % 1 > 0):
raise ValueError("x must be integer counts for fitness='events'")
return t, x, sigma
class RegularEvents(FitnessFunc):
r"""Bayesian blocks fitness for regular events.
This is for data which has a fundamental "tick" length, so that all
measured values are multiples of this tick length. In each tick, there
are either zero or one counts.
Parameters
----------
dt : float
tick rate for data
p0 : float, optional
False alarm probability, used to compute the prior on :math:`N_{\rm
blocks}` (see eq. 21 of Scargle 2013). If gamma is specified, p0 is
ignored.
ncp_prior : float, optional
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are
ignored.
"""
def __init__(self, dt, p0=0.05, gamma=None, ncp_prior=None):
self.dt = dt
super().__init__(p0, gamma, ncp_prior)
def validate_input(self, t, x, sigma):
t, x, sigma = super().validate_input(t, x, sigma)
if not np.all((x == 0) | (x == 1)):
raise ValueError("Regular events must have only 0 and 1 in x")
return t, x, sigma
def fitness(self, T_k, N_k):
# Eq. C23 of Scargle 2013
M_k = T_k / self.dt
N_over_M = N_k / M_k
eps = 1e-8
if np.any(N_over_M > 1 + eps):
warnings.warn(
"regular events: N/M > 1. Is the time step correct?",
AstropyUserWarning,
)
one_m_NM = 1 - N_over_M
N_over_M[N_over_M <= 0] = 1
one_m_NM[one_m_NM <= 0] = 1
return N_k * np.log(N_over_M) + (M_k - N_k) * np.log(one_m_NM)
class PointMeasures(FitnessFunc):
r"""Bayesian blocks fitness for point measures.
Parameters
----------
p0 : float, optional
False alarm probability, used to compute the prior on :math:`N_{\rm
blocks}` (see eq. 21 of Scargle 2013). If gamma is specified, p0 is
ignored.
ncp_prior : float, optional
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are
ignored.
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
super().__init__(p0, gamma, ncp_prior)
def fitness(self, a_k, b_k):
# eq. 41 from Scargle 2013
return (b_k * b_k) / (4 * a_k)
def validate_input(self, t, x, sigma):
if x is None:
raise ValueError("x must be specified for point measures")
return super().validate_input(t, x, sigma)
|
9f312e750b394860e595a5be75f7005212977ee44a166f95188410a08fe287e4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for dealing with circular statistics, for
instance, mean, variance, standard deviation, correlation coefficient, and so
on. This module also cover tests of uniformity, e.g., the Rayleigh and V tests.
The Maximum Likelihood Estimator for the Von Mises distribution along with the
Cramer-Rao Lower Bounds are also implemented. Almost all of the implementations
are based on reference [1]_, which is also the basis for the R package
'CircStats' [2]_.
"""
import numpy as np
from astropy.units import Quantity
__all__ = [
"circmean",
"circstd",
"circvar",
"circmoment",
"circcorrcoef",
"rayleightest",
"vtest",
"vonmisesmle",
]
__doctest_requires__ = {"vtest": ["scipy"]}
def _components(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized rectangular components
# of the circular data.
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError("Weights and data have inconsistent shape.")
C = np.sum(weights * np.cos(p * (data - phi)), axis) / np.sum(weights, axis)
S = np.sum(weights * np.sin(p * (data - phi)), axis) / np.sum(weights, axis)
return C, S
def _angle(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample mean angle
C, S = _components(data, p, phi, axis, weights)
# theta will be an angle in the interval [-np.pi, np.pi)
# [-180, 180)*u.deg in case data is a Quantity
theta = np.arctan2(S, C)
if isinstance(data, Quantity):
theta = theta.to(data.unit)
return theta
def _length(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample length
C, S = _components(data, p, phi, axis, weights)
return np.hypot(S, C)
def circmean(data, axis=None, weights=None):
"""Computes the circular mean angle of an array of circular data.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular means are computed. The default is to compute
the mean of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22, for
detailed explanation.
Returns
-------
circmean : ndarray or `~astropy.units.Quantity`
Circular mean.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmean
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmean(data) # doctest: +FLOAT_CMP
<Quantity 48.62718088722989 deg>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
return _angle(data, 1, 0.0, axis, weights)
def circvar(data, axis=None, weights=None):
"""Computes the circular variance of an array of circular data.
There are some concepts for defining measures of dispersion for circular
data. The variance implemented here is based on the definition given by
[1]_, which is also the same used by the R package 'CircStats' [2]_.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
Dimensionless, if Quantity.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circvar : ndarray or `~astropy.units.Quantity` ['dimensionless']
Circular variance.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circvar
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circvar(data) # doctest: +FLOAT_CMP
<Quantity 0.16356352748437508>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
Notes
-----
The definition used here differs from the one in scipy.stats.circvar.
Precisely, Scipy circvar uses an approximation based on the limit of small
angles which approaches the linear variance.
"""
return 1.0 - _length(data, 1, 0.0, axis, weights)
def circstd(data, axis=None, weights=None, method="angular"):
"""Computes the circular standard deviation of an array of circular data.
The standard deviation implemented here is based on the definitions given
by [1]_, which is also the same used by the R package 'CirStat' [2]_.
Two methods are implemented: 'angular' and 'circular'. The former is
defined as sqrt(2 * (1 - R)) and it is bounded in [0, 2*Pi]. The
latter is defined as sqrt(-2 * ln(R)) and it is bounded in [0, inf].
Following 'CircStat' the default method used to obtain the standard
deviation is 'angular'.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
If quantity, must be dimensionless.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [3]_, remark 1.4, page 22,
for detailed explanation.
method : str, optional
The method used to estimate the standard deviation:
- 'angular' : obtains the angular deviation
- 'circular' : obtains the circular deviation
Returns
-------
circstd : ndarray or `~astropy.units.Quantity` ['dimensionless']
Angular or circular standard deviation.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circstd
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circstd(data) # doctest: +FLOAT_CMP
<Quantity 0.57195022>
Alternatively, using the 'circular' method:
>>> import numpy as np
>>> from astropy.stats import circstd
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circstd(data, method='circular') # doctest: +FLOAT_CMP
<Quantity 0.59766999>
References
----------
.. [1] P. Berens. "CircStat: A MATLAB Toolbox for Circular Statistics".
Journal of Statistical Software, vol 31, issue 10, 2009.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
"""
if method not in ("angular", "circular"):
raise ValueError("method should be either 'angular' or 'circular'")
if method == "angular":
return np.sqrt(2.0 * (1.0 - _length(data, 1, 0.0, axis, weights)))
else:
return np.sqrt(-2.0 * np.log(_length(data, 1, 0.0, axis, weights)))
def circmoment(data, p=1.0, centered=False, axis=None, weights=None):
"""Computes the ``p``-th trigonometric circular moment for an array
of circular data.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
p : float, optional
Order of the circular moment.
centered : bool, optional
If ``True``, central circular moments are computed. Default value is
``False``.
axis : int, optional
Axis along which circular moments are computed. The default is to
compute the circular moment of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circmoment : ndarray or `~astropy.units.Quantity`
The first and second elements correspond to the direction and length of
the ``p``-th circular moment, respectively.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmoment
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmoment(data, p=2) # doctest: +FLOAT_CMP
(<Quantity 90.99263082432564 deg>, <Quantity 0.48004283892950717>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if centered:
phi = circmean(data, axis, weights)
else:
phi = 0.0
return _angle(data, p, phi, axis, weights), _length(data, p, phi, axis, weights)
def circcorrcoef(alpha, beta, axis=None, weights_alpha=None, weights_beta=None):
"""Computes the circular correlation coefficient between two array of
circular data.
Parameters
----------
alpha : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
beta : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular correlation coefficients are computed.
The default is the compute the circular correlation coefficient of the
flattened array.
weights_alpha : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights_alpha``
represents a weighting factor for each group such that
``sum(weights_alpha, axis)`` equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
weights_beta : numpy.ndarray, optional
See description of ``weights_alpha``.
Returns
-------
rho : ndarray or `~astropy.units.Quantity` ['dimensionless']
Circular correlation coefficient.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circcorrcoef
>>> from astropy import units as u
>>> alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302,
... 324, 85, 324, 340, 157, 238, 254, 146, 232, 122,
... 329])*u.deg
>>> beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94,
... 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
>>> circcorrcoef(alpha, beta) # doctest: +FLOAT_CMP
<Quantity 0.2704648826748831>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if np.size(alpha, axis) != np.size(beta, axis):
raise ValueError("alpha and beta must be arrays of the same size")
mu_a = circmean(alpha, axis, weights_alpha)
mu_b = circmean(beta, axis, weights_beta)
sin_a = np.sin(alpha - mu_a)
sin_b = np.sin(beta - mu_b)
rho = np.sum(sin_a * sin_b) / np.sqrt(np.sum(sin_a * sin_a) * np.sum(sin_b * sin_b))
return rho
def rayleightest(data, axis=None, weights=None):
"""Performs the Rayleigh test of uniformity.
This test is used to identify a non-uniform distribution, i.e. it is
designed for detecting an unimodal deviation from uniformity. More
precisely, it assumes the following hypotheses:
- H0 (null hypothesis): The population is distributed uniformly around the
circle.
- H1 (alternative hypothesis): The population is not distributed uniformly
around the circle.
Small p-values suggest to reject the null hypothesis.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the Rayleigh test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``np.sum(weights, axis)``
equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
Returns
-------
p-value : float or `~astropy.units.Quantity` ['dimensionless']
Examples
--------
>>> import numpy as np
>>> from astropy.stats import rayleightest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> rayleightest(data) # doctest: +FLOAT_CMP
<Quantity 0.2563487733797317>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
.. [4] D. Wilkie. "Rayleigh Test for Randomness of Circular Data". Applied
Statistics. 1983.
<http://wexler.free.fr/library/files/wilkie%20(1983)%20rayleigh%20test%20for%20randomness%20of%20circular%20data.pdf>
"""
n = np.size(data, axis=axis)
Rbar = _length(data, 1, 0.0, axis, weights)
z = n * Rbar * Rbar
# see [3] and [4] for the formulae below
tmp = 1.0
if n < 50:
tmp = (
1.0
+ (2.0 * z - z * z) / (4.0 * n)
- (24.0 * z - 132.0 * z**2.0 + 76.0 * z**3.0 - 9.0 * z**4.0)
/ (288.0 * n * n)
)
p_value = np.exp(-z) * tmp
return p_value
def vtest(data, mu=0.0, axis=None, weights=None):
"""Performs the Rayleigh test of uniformity where the alternative
hypothesis H1 is assumed to have a known mean angle ``mu``.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
mu : float or `~astropy.units.Quantity` ['angle'], optional
Mean angle. Assumed to be known.
axis : int, optional
Axis along which the V test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
p-value : float or `~astropy.units.Quantity` ['dimensionless']
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vtest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vtest(data) # doctest: +FLOAT_CMP
<Quantity 0.6223678199713766>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
"""
from scipy.stats import norm
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError("Weights and data have inconsistent shape.")
n = np.size(data, axis=axis)
R0bar = np.sum(weights * np.cos(data - mu), axis) / np.sum(weights, axis)
z = np.sqrt(2.0 * n) * R0bar
pz = norm.cdf(z)
fz = norm.pdf(z)
# see reference [3]
p_value = (
1
- pz
+ fz
* (
(3 * z - z**3) / (16.0 * n)
+ (15 * z + 305 * z**3 - 125 * z**5 + 9 * z**7) / (4608.0 * n * n)
)
)
return p_value
def _A1inv(x):
# Approximation for _A1inv(x) according R Package 'CircStats'
# See http://www.scienceasia.org/2012.38.n1/scias38_118.pdf, equation (4)
if 0 <= x < 0.53:
return 2.0 * x + x * x * x + (5.0 * x**5) / 6.0
elif x < 0.85:
return -0.4 + 1.39 * x + 0.43 / (1.0 - x)
else:
return 1.0 / (x * x * x - 4.0 * x * x + 3.0 * x)
def vonmisesmle(data, axis=None):
"""Computes the Maximum Likelihood Estimator (MLE) for the parameters of
the von Mises distribution.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the mle will be computed.
Returns
-------
mu : float or `~astropy.units.Quantity`
The mean (aka location parameter).
kappa : float or `~astropy.units.Quantity` ['dimensionless']
The concentration parameter.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vonmisesmle
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vonmisesmle(data) # doctest: +FLOAT_CMP
(<Quantity 101.16894320013179 deg>, <Quantity 1.49358958737054>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
mu = circmean(data, axis=None)
kappa = _A1inv(np.mean(np.cos(data - mu), axis))
return mu, kappa
|
0b98e5fd06be74e8e2985581099dba4263eb908521840f1095831432c8168e92 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements functions and classes for spatial statistics.
"""
import math
import numpy as np
__all__ = ["RipleysKEstimator"]
class RipleysKEstimator:
"""
Estimators for Ripley's K function for two-dimensional spatial data.
See [1]_, [2]_, [3]_, [4]_, [5]_ for detailed mathematical and
practical aspects of those estimators.
Parameters
----------
area : float
Area of study from which the points where observed.
x_max, y_max : float, float, optional
Maximum rectangular coordinates of the area of study.
Required if ``mode == 'translation'`` or ``mode == ohser``.
x_min, y_min : float, float, optional
Minimum rectangular coordinates of the area of study.
Required if ``mode == 'variable-width'`` or ``mode == ohser``.
Examples
--------
>>> import numpy as np
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> from astropy.stats import RipleysKEstimator
>>> z = np.random.uniform(low=5, high=10, size=(100, 2))
>>> Kest = RipleysKEstimator(area=25, x_max=10, y_max=10,
... x_min=5, y_min=5)
>>> r = np.linspace(0, 2.5, 100)
>>> plt.plot(r, Kest.poisson(r)) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='none')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='translation')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ohser')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='var-width')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ripley')) # doctest: +SKIP
References
----------
.. [1] Peebles, P.J.E. *The large scale structure of the universe*.
<https://ui.adsabs.harvard.edu/abs/1980lssu.book.....P>
.. [2] Spatial descriptive statistics.
<https://en.wikipedia.org/wiki/Spatial_descriptive_statistics>
.. [3] Package spatstat.
<https://cran.r-project.org/web/packages/spatstat/spatstat.pdf>
.. [4] Cressie, N.A.C. (1991). Statistics for Spatial Data,
Wiley, New York.
.. [5] Stoyan, D., Stoyan, H. (1992). Fractals, Random Shapes and
Point Fields, Akademie Verlag GmbH, Chichester.
"""
def __init__(self, area, x_max=None, y_max=None, x_min=None, y_min=None):
self.area = area
self.x_max = x_max
self.y_max = y_max
self.x_min = x_min
self.y_min = y_min
@property
def area(self):
return self._area
@area.setter
def area(self, value):
if isinstance(value, (float, int)) and value > 0:
self._area = value
else:
raise ValueError(f"area is expected to be a positive number. Got {value}.")
@property
def y_max(self):
return self._y_max
@y_max.setter
def y_max(self, value):
if value is None or isinstance(value, (float, int)):
self._y_max = value
else:
raise ValueError(
f"y_max is expected to be a real number or None. Got {value}."
)
@property
def x_max(self):
return self._x_max
@x_max.setter
def x_max(self, value):
if value is None or isinstance(value, (float, int)):
self._x_max = value
else:
raise ValueError(
f"x_max is expected to be a real number or None. Got {value}."
)
@property
def y_min(self):
return self._y_min
@y_min.setter
def y_min(self, value):
if value is None or isinstance(value, (float, int)):
self._y_min = value
else:
raise ValueError(f"y_min is expected to be a real number. Got {value}.")
@property
def x_min(self):
return self._x_min
@x_min.setter
def x_min(self, value):
if value is None or isinstance(value, (float, int)):
self._x_min = value
else:
raise ValueError(f"x_min is expected to be a real number. Got {value}.")
def __call__(self, data, radii, mode="none"):
return self.evaluate(data=data, radii=radii, mode=mode)
def _pairwise_diffs(self, data):
npts = len(data)
diff = np.zeros(shape=(npts * (npts - 1) // 2, 2), dtype=np.double)
k = 0
for i in range(npts - 1):
size = npts - i - 1
diff[k : k + size] = abs(data[i] - data[i + 1 :])
k += size
return diff
def poisson(self, radii):
"""
Evaluates the Ripley K function for the homogeneous Poisson process,
also known as Complete State of Randomness (CSR).
Parameters
----------
radii : 1D array
Set of distances in which Ripley's K function will be evaluated.
Returns
-------
output : 1D array
Ripley's K function evaluated at ``radii``.
"""
return np.pi * radii * radii
def Lfunction(self, data, radii, mode="none"):
"""
Evaluates the L function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return np.sqrt(self.evaluate(data, radii, mode=mode) / np.pi)
def Hfunction(self, data, radii, mode="none"):
"""
Evaluates the H function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return self.Lfunction(data, radii, mode=mode) - radii
def evaluate(self, data, radii, mode="none"):
"""
Evaluates the Ripley K estimator for a given set of values ``radii``.
Parameters
----------
data : 2D array
Set of observed points in as a n by 2 array which will be used to
estimate Ripley's K function.
radii : 1D array
Set of distances in which Ripley's K estimator will be evaluated.
Usually, it's common to consider max(radii) < (area/2)**0.5.
mode : str
Keyword which indicates the method for edge effects correction.
Available methods are 'none', 'translation', 'ohser', 'var-width',
and 'ripley'.
* 'none'
this method does not take into account any edge effects
whatsoever.
* 'translation'
computes the intersection of rectangular areas centered at
the given points provided the upper bounds of the
dimensions of the rectangular area of study. It assumes that
all the points lie in a bounded rectangular region satisfying
x_min < x_i < x_max; y_min < y_i < y_max. A detailed
description of this method can be found on ref [4].
* 'ohser'
this method uses the isotropized set covariance function of
the window of study as a weight to correct for
edge-effects. A detailed description of this method can be
found on ref [4].
* 'var-width'
this method considers the distance of each observed point to
the nearest boundary of the study window as a factor to
account for edge-effects. See [3] for a brief description of
this method.
* 'ripley'
this method is known as Ripley's edge-corrected estimator.
The weight for edge-correction is a function of the
proportions of circumferences centered at each data point
which crosses another data point of interest. See [3] for
a detailed description of this method.
Returns
-------
ripley : 1D array
Ripley's K function estimator evaluated at ``radii``.
"""
data = np.asarray(data)
if not data.shape[1] == 2:
raise ValueError(
"data must be an n by 2 array, where n is the "
"number of observed points."
)
npts = len(data)
ripley = np.zeros(len(radii))
if mode == "none":
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
for r in range(len(radii)):
ripley[r] = (distances < radii[r]).sum()
ripley = self.area * 2.0 * ripley / (npts * (npts - 1))
# eq. 15.11 Stoyan book page 283
elif mode == "translation":
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
intersec_area = ((self.x_max - self.x_min) - diff[:, 0]) * (
(self.y_max - self.y_min) - diff[:, 1]
)
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / intersec_area) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Stoyan book page 123 and eq 15.13
elif mode == "ohser":
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
a = self.area
b = max(
(self.y_max - self.y_min) / (self.x_max - self.x_min),
(self.x_max - self.x_min) / (self.y_max - self.y_min),
)
x = distances / math.sqrt(a / b)
u = np.sqrt((x * x - 1) * (x > 1))
v = np.sqrt((x * x - b**2) * (x < math.sqrt(b**2 + 1)) * (x > b))
c1 = np.pi - 2 * x * (1 + 1 / b) + x * x / b
c2 = 2 * np.arcsin((1 / x) * (x > 1)) - 1 / b - 2 * (x - u)
c3 = (
2
* np.arcsin(
((b - u * v) / (x * x)) * (x > b) * (x < math.sqrt(b**2 + 1))
)
+ 2 * u
+ 2 * v / b
- b
- (1 + x * x) / b
)
cov_func = (a / np.pi) * (
c1 * (x >= 0) * (x <= 1)
+ c2 * (x > 1) * (x <= b)
+ c3 * (b < x) * (x < math.sqrt(b**2 + 1))
)
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / cov_func) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Cressie book eq 8.2.20 page 616
elif mode == "var-width":
lt_dist = np.minimum(
np.minimum(self.x_max - data[:, 0], self.y_max - data[:, 1]),
np.minimum(data[:, 0] - self.x_min, data[:, 1] - self.y_min),
)
for r in range(len(radii)):
for i in range(npts):
for j in range(npts):
if i != j:
diff = abs(data[i] - data[j])
dist = math.sqrt((diff * diff).sum())
if dist < radii[r] < lt_dist[i]:
ripley[r] = ripley[r] + 1
lt_dist_sum = (lt_dist > radii[r]).sum()
if not lt_dist_sum == 0:
ripley[r] = ripley[r] / lt_dist_sum
ripley = self.area * ripley / npts
# Cressie book eq 8.4.22 page 640
elif mode == "ripley":
hor_dist = np.zeros(shape=(npts * (npts - 1)) // 2, dtype=np.double)
ver_dist = np.zeros(shape=(npts * (npts - 1)) // 2, dtype=np.double)
for k in range(npts - 1):
min_hor_dist = min(self.x_max - data[k][0], data[k][0] - self.x_min)
min_ver_dist = min(self.y_max - data[k][1], data[k][1] - self.y_min)
start = (k * (2 * (npts - 1) - (k - 1))) // 2
end = ((k + 1) * (2 * (npts - 1) - k)) // 2
hor_dist[start:end] = min_hor_dist * np.ones(npts - 1 - k)
ver_dist[start:end] = min_ver_dist * np.ones(npts - 1 - k)
diff = self._pairwise_diffs(data)
dist = np.hypot(diff[:, 0], diff[:, 1])
dist_ind = dist <= np.hypot(hor_dist, ver_dist)
w1 = (
1
- (
np.arccos(np.minimum(ver_dist, dist) / dist)
+ np.arccos(np.minimum(hor_dist, dist) / dist)
)
/ np.pi
)
w2 = (
3 / 4
- 0.5
* (
np.arccos(ver_dist / dist * ~dist_ind)
+ np.arccos(hor_dist / dist * ~dist_ind)
)
/ np.pi
)
weight = dist_ind * w1 + ~dist_ind * w2
for r in range(len(radii)):
ripley[r] = ((dist < radii[r]) / weight).sum()
ripley = self.area * 2.0 * ripley / (npts * (npts - 1))
else:
raise ValueError(f"mode {mode} is not implemented.")
return ripley
|
94dc9a77f74b0622c774b2b4f982c8fa86cfcf8ae956e489bb3725e141dddec7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import numpy as np
import astropy.units as u
from . import _stats
__all__ = [
"gaussian_fwhm_to_sigma",
"gaussian_sigma_to_fwhm",
"binom_conf_interval",
"binned_binom_proportion",
"poisson_conf_interval",
"median_absolute_deviation",
"mad_std",
"signal_to_noise_oir_ccd",
"bootstrap",
"kuiper",
"kuiper_two",
"kuiper_false_positive_probability",
"cdf_from_intervals",
"interval_overlap_length",
"histogram_intervals",
"fold_intervals",
]
__doctest_skip__ = ["binned_binom_proportion"]
__doctest_requires__ = {
"binom_conf_interval": ["scipy"],
"poisson_conf_interval": ["scipy"],
}
gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1.0 / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
def binom_conf_interval(k, n, confidence_level=0.68269, interval="wilson"):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]])
"""
if confidence_level < 0.0 or confidence_level > 1.0:
raise ValueError("confidence_level must be between 0. and 1.")
alpha = 1.0 - confidence_level
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError("n must be positive")
if (k < 0).any() or (k > n).any():
raise ValueError("k must be in {0, 1, .., n}")
if interval == "wilson" or interval == "wald":
from scipy.special import erfinv
kappa = np.sqrt(2.0) * min(erfinv(confidence_level), 1.0e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == "wilson":
midpoint = (k + kappa**2 / 2.0) / (n + kappa**2)
halflength = (
(kappa * np.sqrt(n))
/ (n + kappa**2)
* np.sqrt(p * (1 - p) + kappa**2 / (4 * n))
)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.0] = 0.0
conf_interval[conf_interval > 1.0] = 1.0
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1.0 - p) / n)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
elif interval == "jeffreys" or interval == "flat":
from scipy.special import betaincinv
if interval == "jeffreys":
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1.0 - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1.0 - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.0
elif k == n:
upperbound = 1.0
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f"Unrecognized interval: {interval:s}")
return conf_interval
def binned_binom_proportion(
x, success, bins=10, range=None, confidence_level=0.68269, interval="wilson"
):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError("sizes of x and success must match")
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.0
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(
k, n, confidence_level=confidence_level, interval=interval
)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, confidence_level, name):
if sigma != 1:
raise ValueError(f"Only sigma=1 supported for interval {name}")
if background != 0:
raise ValueError(f"background not supported for interval {name}")
if confidence_level is not None:
raise ValueError(f"confidence_level not supported for interval {name}")
def poisson_conf_interval(
n, interval="root-n", sigma=1, background=0, confidence_level=None
):
r"""Poisson parameter confidence interval given observed counts.
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also discusses several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <http://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]])
"""
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == "root-n":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
elif interval == "root-n-0":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == "pearson":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array(
[n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)]
)
elif interval == "sherpagehrels":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)])
elif interval == "frequentist-confidence":
_check_poisson_conf_inputs(1.0, background, confidence_level, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array(
[
0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha),
]
)
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == "kraft-burrows-nousek":
# Deprecation warning in Python 3.9 when N is float, so we force int,
# see https://github.com/astropy/astropy/issues/10832
if np.isscalar(n):
if not isinstance(n, int):
raise TypeError("Number of counts must be integer.")
elif not issubclass(n.dtype.type, np.integer):
raise TypeError("Number of counts must be integer.")
if confidence_level is None:
raise ValueError(
f"Set confidence_level for method {interval}. (sigma is ignored.)"
)
confidence_level = np.asanyarray(confidence_level)
if np.any(confidence_level <= 0) or np.any(confidence_level >= 1):
raise ValueError("confidence_level must be a number between 0 and 1.")
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError("Background must be >= 0.")
conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)(
n, background, confidence_level
)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}")
return conf_interval
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median # drops units if result is NaN
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (
isinstance(data, u.Quantity)
and func is np.median
and data_median.ndim == 0
and np.isnan(data_median)
):
data_median = data.__array_wrap__(data_median)
# broadcast the median array before subtraction
if axis is not None:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (
isinstance(data, u.Quantity)
and func is np.median
and result.ndim == 0
and np.isnan(result)
):
result = data.__array_wrap__(result)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(
t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd**2
)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <http://mpmath.org/>`_ library.
"""
from math import exp
from scipy.integrate import quad
from scipy.optimize import brentq
from scipy.special import factorial
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
return 1.0 / (exp(-B) * np.sum(np.power(B, n) / factorial(n)))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.0
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <http://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
"""
from mpmath import exp, factorial, findroot, fsum, mpf, power, quad
# We convert these values to float. Because for some reason,
# mpmath.mpf cannot convert from numpy.int64
N = mpf(float(N))
B = mpf(float(B))
CL = mpf(float(CL))
tol = 1e-4
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1.0 / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
# If B > N, then N-B, the "most probable" values is < 0
# and thus s_min is certainly 0.
# Note: For small N, s_max is also close to 0 and root finding
# might find the wrong root, thus it is important to handle this
# case here and return the analytical answer (s_min = 0).
if (B >= N) or (eqn7(0, N, B) >= y_S_max):
return 0.0
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, [0.0, N - B], solver="ridder", tol=tol)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
# Several numerical problems were found prevent the solvers from finding
# the roots unless the starting values are very close to the final values.
# Thus, this primitive, time-wasting, brute-force stepping here to get
# an interval that can be fed into the ridder solver.
s_max_guess = max(N - B, 1.0)
while func(s_max_guess) < 0:
s_max_guess += 1
S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver="ridder", tol=tol)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
"""
from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError("Need mpmath package for input numbers this large.")
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError("Either scipy or mpmath are required.")
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import comb, factorial
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import comb, factorial
if D < 0.0 or D > 2.0:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2.0 / N:
return 1.0 - factorial(N) * (D - 1.0 / N) ** (N - 1)
elif D < 3.0 / N:
k = -(N * D - 1.0) / 2.0
r = np.sqrt(k**2 - (N * D - 2.0) ** 2 / 2.0)
a, b = -k + r, -k - r
return 1 - (
factorial(N - 1)
* (b ** (N - 1) * (1 - a) - a ** (N - 1) * (1 - b))
/ N ** (N - 2)
/ (b - a)
)
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.0) / (2.0 * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y ** (t - 3) * (
y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2
)
term1 = comb(N, t)
term2 = (1 - D - t / N) ** (N - t - 1)
# term1 is formally finite, but is approximated by numpy as np.inf for
# large values, so we set them to zero manually when they would be
# multiplied by zero anyway
term1[(term1 == np.inf) & (term2 == 0)] = 0.0
final_term = Tt * term1 * term2
return final_term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (
ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)
).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = np.amax(cdfv - np.arange(N) / float(N)) + np.amax(
(np.arange(N) + 1) / float(N) - cdfv
)
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
(n1,) = data1.shape
(n2,) = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (
np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)
):
raise ValueError("kuiper_two only accepts real inputs")
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError("kuiper_two only accepts non-nan inputs")
D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for a, b, wt in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.0)
breaks.add(1.0)
breaks = sorted(breaks)
breaks_map = {f: i for (i, f) in enumerate(breaks)}
totals = np.zeros(len(breaks) - 1)
totals += tot
for a, b, wt in r:
totals[breaks_map[a] : breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError("Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.0
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end))
h[j] += ol / (1.0 / n) * totals[i]
start = end
return h
|
43d698ae13b41173e91ca458e60bfa8c789523c8f24ecc348e9836af55e2b126 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for model selection.
"""
import numpy as np
__all__ = [
"bayesian_info_criterion",
"bayesian_info_criterion_lsq",
"akaike_info_criterion",
"akaike_info_criterion_lsq",
]
__doctest_requires__ = {
"bayesian_info_criterion_lsq": ["scipy"],
"akaike_info_criterion_lsq": ["scipy"],
}
def bayesian_info_criterion(log_likelihood, n_params, n_samples):
r"""Computes the Bayesian Information Criterion (BIC) given the log of the
likelihood function evaluated at the estimated (or analytically derived)
parameters, the number of parameters, and the number of samples.
The BIC is usually applied to decide whether increasing the number of free
parameters (hence, increasing the model complexity) yields significantly
better fittings. The decision is in favor of the model with the lowest
BIC.
BIC is given as
.. math::
\mathrm{BIC} = k \ln(n) - 2L,
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
When comparing two models define
:math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which
:math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is
the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is
the evidence against the model with higher BIC.
The general rule of thumb is:
:math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is
better
:math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is
better
:math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is
better
:math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is
better
For a detailed explanation, see [1]_ - [5]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Bayesian Information Criterion.
Examples
--------
The following example was originally presented in [1]_. Consider a
Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta).
In addition, assume that the t model has presented a higher likelihood.
The question that the BIC is proposed to answer is: "Is the increase in
likelihood due to larger number of parameters?"
>>> from astropy.stats.info_theory import bayesian_info_criterion
>>> lnL_g = -176.4
>>> lnL_t = -173.0
>>> n_params_g = 2
>>> n_params_t = 3
>>> n_samples = 100
>>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples)
>>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples)
>>> bic_g - bic_t # doctest: +FLOAT_CMP
2.1948298140119391
Therefore, there exist a moderate evidence that the increasing in
likelihood for t-Student model is due to the larger number of parameters.
References
----------
.. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian
Information Criterion.
<https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf>
.. [2] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [4] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [5] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
return n_params * np.log(n_samples) - 2.0 * log_likelihood
# NOTE: bic_t - bic_g doctest is skipped because it produced slightly
# different result in arm64 and big-endian s390x CI jobs.
def bayesian_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Bayesian Information Criterion (BIC) assuming that the
observations come from a Gaussian distribution.
In this case, BIC is given as
.. math::
\mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic. See [1]_ and [2]_.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Examples
--------
Consider the simple 1-D fitting example presented in the Astropy
modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to
a source flux using the least squares statistic. However, the fittings
themselves do not tell much about which model better represents this
hypothetical source. Therefore, we are going to apply to BIC in order to
decide in favor of a model.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import bayesian_info_criterion_lsq
>>> # Generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(-5., 5., 200)
>>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
>>> y += np.random.normal(0., 0.2, x.shape)
>>> # Fit the data using a Box model.
>>> # Bounds are not really needed but included here to demonstrate usage.
>>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5,
... bounds={"x_0": (-5., 5.)})
>>> fit_t = fitting.LevMarLSQFitter()
>>> t = fit_t(t_init, x, y)
>>> # Fit the data using a Gaussian
>>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
>>> fit_g = fitting.LevMarLSQFitter()
>>> g = fit_g(g_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_t = np.sum((t(x) - y)*(t(x) - y))
>>> ssr_g = np.sum((g(x) - y)*(g(x) - y))
>>> # Compute the bics
>>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0])
>>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0])
>>> bic_t - bic_g # doctest: +SKIP
30.644474706065466
Hence, there is a very strong evidence that the Gaussian model has a
significantly better representation of the data than the Box model. This
is, obviously, expected since the true model is Gaussian.
References
----------
.. [1] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [3] Astropy Models and Fitting
<https://docs.astropy.org/en/stable/modeling>
"""
return bayesian_info_criterion(
-0.5 * n_samples * np.log(ssr / n_samples), n_params, n_samples
)
def akaike_info_criterion(log_likelihood, n_params, n_samples):
r"""
Computes the Akaike Information Criterion (AIC).
Like the Bayesian Information Criterion, the AIC is a measure of
relative fitting quality which is used for fitting evaluation and model
selection. The decision is in favor of the model with the lowest AIC.
AIC is given as
.. math::
\mathrm{AIC} = 2(k - L)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
In case that the sample size is not "large enough" a correction is
applied, i.e.
.. math::
\mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1}
Rule of thumb [1]_:
:math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}`
:math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i
:math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i
:math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i
in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the
models which are being compared.
For detailed explanations see [1]_-[6]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
The following example was originally presented in [2]_. Basically, two
models are being compared. One with six parameters (model 1) and another
with five parameters (model 2). Despite of the fact that model 2 has a
lower AIC, we could decide in favor of model 1 since the difference (in
AIC) between them is only about 1.0.
>>> n_samples = 121
>>> lnL1 = -3.54
>>> n1_params = 6
>>> lnL2 = -4.17
>>> n2_params = 5
>>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples)
>>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples)
>>> aic1 - aic2 # doctest: +FLOAT_CMP
0.9551029748283746
Therefore, we can strongly support the model 1 with the advantage that
it has more free parameters.
References
----------
.. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike
Information Criterion.
<http://machinelearning102.pbworks.com/w/file/fetch/47699383/ms_lec_2_ho.pdf>
.. [2] Mazerolle, M. J. Making sense out of Akaike's Information
Criterion (AIC): its use and interpretation in model selection and
inference from ecological data.
.. [3] Wikipedia. Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [4] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [5] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [6] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
# Correction in case of small number of observations
if n_samples / float(n_params) >= 40.0:
aic = 2.0 * (n_params - log_likelihood)
else:
aic = 2.0 * (n_params - log_likelihood) + 2.0 * n_params * (n_params + 1.0) / (
n_samples - n_params - 1.0
)
return aic
def akaike_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Akaike Information Criterion assuming that the observations
are Gaussian distributed.
In this case, AIC is given as
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k
In case that the sample size is not "large enough", a correction is
applied, i.e.
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k +
\dfrac{2k(k+1)}{n-k-1}
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., the dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
This example is based on Astropy Modeling webpage, Compound models
section.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import akaike_info_criterion_lsq
>>> np.random.seed(42)
>>> # Generate fake data
>>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level
>>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian
>>> g3 = models.Gaussian1D(2.5, 0.5, 0.1)
>>> x = np.linspace(-1, 1, 200)
>>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape)
>>> # Fit with three Gaussians
>>> g3_init = (models.Gaussian1D(.1, 0, 0.1)
... + models.Gaussian1D(.1, 0.2, 0.15)
... + models.Gaussian1D(2.4, .4, 0.1))
>>> fitter = fitting.LevMarLSQFitter()
>>> g3_fit = fitter(g3_init, x, y)
>>> # Fit with two Gaussians
>>> g2_init = (models.Gaussian1D(.1, 0, 0.1) +
... models.Gaussian1D(2, 0.5, 0.1))
>>> g2_fit = fitter(g2_init, x, y)
>>> # Fit with only one Gaussian
>>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5)
>>> g1_fit = fitter(g1_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0)
>>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0)
>>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0)
>>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP
-634.5257517810961
>>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP
-662.83834510232043
>>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP
-647.47312032659499
Hence, from the AIC values, we would prefer to choose the model g2_fit.
However, we can considerably support the model g3_fit, since the
difference in AIC is about 2.4. We should reject the model g1_fit.
References
----------
.. [1] Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
"""
return akaike_info_criterion(
-0.5 * n_samples * np.log(ssr / n_samples), n_params, n_samples
)
|
e70b1e6d07d7f652ae5faa70db39293a0b149c7f3b6a72ef34d34daecde79ac3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions for computing robust statistics using
Tukey's biweight function.
"""
import numpy as np
from .funcs import median_absolute_deviation
__all__ = [
"biweight_location",
"biweight_scale",
"biweight_midvariance",
"biweight_midcovariance",
"biweight_midcorrelation",
]
def _stat_functions(data, ignore_nan=False):
if isinstance(data, np.ma.MaskedArray):
median_func = np.ma.median
sum_func = np.ma.sum
elif ignore_nan:
median_func = np.nanmedian
sum_func = np.nansum
else:
median_func = np.median
sum_func = np.sum
return median_func, sum_func
def biweight_location(data, c=6.0, M=None, axis=None, *, ignore_nan=False):
r"""
Compute the biweight location.
The biweight location is a robust statistic for determining the
central location of a distribution. It is given by:
.. math::
\zeta_{biloc}= M + \frac{\sum_{|u_i|<1} \ (x_i - M) (1 - u_i^2)^2}
{\sum_{|u_i|<1} \ (1 - u_i^2)^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input initial location guess) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight location tuning constant ``c`` is typically 6.0 (the
default).
If :math:`MAD` is zero, then the median will be returned.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 6.0).
M : float or array-like, optional
Initial guess for the location. If ``M`` is a scalar value,
then its value will be used for the entire array (or along each
``axis``, if specified). If ``M`` is an array, then its must be
an array containing the initial location estimate along each
``axis`` of the input array. If `None` (default), then the
median of the input array will be used (or along each ``axis``,
if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight locations are
computed. If `None` (default), then the biweight location of
the flattened input array will be computed.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_location : float or `~numpy.ndarray`
The biweight location of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwloc.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight location of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_location
>>> rand = np.random.default_rng(12345)
>>> biloc = biweight_location(rand.standard_normal(1000))
>>> print(biloc) # doctest: +FLOAT_CMP
0.01535330525461019
"""
median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan)
if isinstance(data, np.ma.MaskedArray) and ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = median_func(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan)
# mad = 0 means data is constant or mostly constant
# mad = np.nan means data contains NaNs and ignore_nan=False
if axis is None and (mad == 0.0 or np.isnan(mad)):
return M
if axis is not None:
mad = np.expand_dims(mad, axis=axis)
with np.errstate(divide="ignore", invalid="ignore"):
u = d / (c * mad)
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
mask = np.abs(u) >= 1
u = (1 - u**2) ** 2
u[mask] = 0
# If mad == 0 along the specified ``axis`` in the input data, return
# the median value along that axis.
# Ignore RuntimeWarnings for divide by zero
with np.errstate(divide="ignore", invalid="ignore"):
value = M.squeeze() + (sum_func(d * u, axis=axis) / sum_func(u, axis=axis))
if np.isscalar(value):
return value
where_func = np.where
if isinstance(data, np.ma.MaskedArray):
where_func = np.ma.where # return MaskedArray
return where_func(mad.squeeze() == 0, M.squeeze(), value)
def biweight_scale(
data, c=9.0, M=None, axis=None, modify_sample_size=False, *, ignore_nan=False
):
r"""
Compute the biweight scale.
The biweight scale is a robust statistic for determining the
standard deviation of a distribution. It is the square root of the
`biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_.
It is given by:
.. math::
\zeta_{biscl} = \sqrt{n} \ \frac{\sqrt{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4}} {|(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))|}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
If :math:`MAD` is zero, then zero will be returned.
For the standard definition of biweight scale, :math:`n` is the
total number of points in the array (or along the input ``axis``, if
specified). That definition is used if ``modify_sample_size`` is
`False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true standard deviation for
small sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight scales are computed.
If `None` (default), then the biweight scale of the flattened
input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
scale. If `True`, then the sample size is reduced to correct
for any rejected values (i.e. the sample size used includes only
the non-rejected values), which results in a value closer to the
true standard deviation for small sample sizes or for a large
number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_scale : float or `~numpy.ndarray`
The biweight scale of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_midvariance, biweight_midcovariance, biweight_location, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwscale.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight scale of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_scale
>>> rand = np.random.default_rng(12345)
>>> biscl = biweight_scale(rand.standard_normal(1000))
>>> print(biscl) # doctest: +FLOAT_CMP
1.0239311812635818
"""
return np.sqrt(
biweight_midvariance(
data,
c=c,
M=M,
axis=axis,
modify_sample_size=modify_sample_size,
ignore_nan=ignore_nan,
)
)
def biweight_midvariance(
data, c=9.0, M=None, axis=None, modify_sample_size=False, *, ignore_nan=False
):
r"""
Compute the biweight midvariance.
The biweight midvariance is a robust statistic for determining the
variance of a distribution. Its square root is a robust estimator
of scale (i.e. standard deviation). It is given by:
.. math::
\zeta_{bivar} = n \ \frac{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4} {(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
If :math:`MAD` is zero, then zero will be returned.
For the standard definition of `biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_,
:math:`n` is the total number of points in the array (or along the
input ``axis``, if specified). That definition is used if
``modify_sample_size`` is `False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight midvariances are
computed. If `None` (default), then the biweight midvariance of
the flattened input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midvariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true variance for small sample sizes or for a
large number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_midvariance : float or `~numpy.ndarray`
The biweight midvariance of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
See Also
--------
biweight_midcovariance, biweight_midcorrelation, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance
.. [2] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight midvariance of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_midvariance
>>> rand = np.random.default_rng(12345)
>>> bivar = biweight_midvariance(rand.standard_normal(1000))
>>> print(bivar) # doctest: +FLOAT_CMP
1.0484350639638342
"""
median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan)
if isinstance(data, np.ma.MaskedArray) and ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = median_func(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan)
if axis is None:
# data is constant or mostly constant OR
# data contains NaNs and ignore_nan=False
if mad == 0.0 or np.isnan(mad):
return mad**2 # variance units
else:
mad = np.expand_dims(mad, axis=axis)
with np.errstate(divide="ignore", invalid="ignore"):
u = d / (c * mad)
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
mask = np.abs(u) < 1
if isinstance(mask, np.ma.MaskedArray):
mask = mask.filled(fill_value=False) # exclude masked data values
u = u**2
if modify_sample_size:
n = sum_func(mask, axis=axis)
else:
# set good values to 1, bad values to 0
include_mask = np.ones(data.shape)
if isinstance(data, np.ma.MaskedArray):
include_mask[data.mask] = 0
if ignore_nan:
include_mask[np.isnan(data)] = 0
n = np.sum(include_mask, axis=axis)
f1 = d * d * (1.0 - u) ** 4
f1[~mask] = 0.0
f1 = sum_func(f1, axis=axis)
f2 = (1.0 - u) * (1.0 - 5.0 * u)
f2[~mask] = 0.0
f2 = np.abs(np.sum(f2, axis=axis)) ** 2
# If mad == 0 along the specified ``axis`` in the input data, return
# 0.0 along that axis.
# Ignore RuntimeWarnings for divide by zero.
with np.errstate(divide="ignore", invalid="ignore"):
value = n * f1 / f2
if np.isscalar(value):
return value
where_func = np.where
if isinstance(data, np.ma.MaskedArray):
where_func = np.ma.where # return MaskedArray
return where_func(mad.squeeze() == 0, 0.0, value)
def biweight_midcovariance(data, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcovariance between pairs of multiple
variables.
The biweight midcovariance is a robust and resistant estimator of
the covariance between two variables.
This function computes the biweight midcovariance between all pairs
of the input variables (rows) in the input data. The output array
will have a shape of (N_variables, N_variables). The diagonal
elements will be the biweight midvariances of each input variable
(see :func:`biweight_midvariance`). The off-diagonal elements will
be the biweight midcovariances between each pair of input variables.
For example, if the input array ``data`` contains three variables
(rows) ``x``, ``y``, and ``z``, the output `~numpy.ndarray`
midcovariance matrix will be:
.. math::
\begin{pmatrix}
\zeta_{xx} & \zeta_{xy} & \zeta_{xz} \\
\zeta_{yx} & \zeta_{yy} & \zeta_{yz} \\
\zeta_{zx} & \zeta_{zy} & \zeta_{zz}
\end{pmatrix}
where :math:`\zeta_{xx}`, :math:`\zeta_{yy}`, and :math:`\zeta_{zz}`
are the biweight midvariances of each variable. The biweight
midcovariance between :math:`x` and :math:`y` is :math:`\zeta_{xy}`
(:math:`= \zeta_{yx}`). The biweight midcovariance between
:math:`x` and :math:`z` is :math:`\zeta_{xz}` (:math:`=
\zeta_{zx}`). The biweight midcovariance between :math:`y` and
:math:`z` is :math:`\zeta_{yz}` (:math:`= \zeta_{zy}`).
The biweight midcovariance between two variables :math:`x` and
:math:`y` is given by:
.. math::
\zeta_{xy} = n_{xy} \ \frac{\sum_{|u_i| < 1, \ |v_i| < 1} \
(x_i - M_x) (1 - u_i^2)^2 (y_i - M_y) (1 - v_i^2)^2}
{(\sum_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2))
(\sum_{|v_i| < 1} \ (1 - v_i^2) (1 - 5v_i^2))}
where :math:`M_x` and :math:`M_y` are the medians (or the input
locations) of the two variables and :math:`u_i` and :math:`v_i` are
given by:
.. math::
u_{i} = \frac{(x_i - M_x)}{c * MAD_x}
v_{i} = \frac{(y_i - M_y)}{c * MAD_y}
where :math:`c` is the biweight tuning constant and :math:`MAD_x`
and :math:`MAD_y` are the `median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ of the
:math:`x` and :math:`y` variables. The biweight midvariance tuning
constant ``c`` is typically 9.0 (the default).
If :math:`MAD_x` or :math:`MAD_y` are zero, then zero will be
returned for that element.
For the standard definition of biweight midcovariance,
:math:`n_{xy}` is the total number of observations of each variable.
That definition is used if ``modify_sample_size`` is `False`, which
is the default.
However, if ``modify_sample_size = True``, then :math:`n_{xy}` is the
number of observations for which :math:`|u_i| < 1` and/or :math:`|v_i|
< 1`, i.e.
.. math::
n_{xx} = \sum_{|u_i| < 1} \ 1
.. math::
n_{xy} = n_{yx} = \sum_{|u_i| < 1, \ |v_i| < 1} \ 1
.. math::
n_{yy} = \sum_{|v_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : 2D or 1D array-like
Input data either as a 2D or 1D array. For a 2D array, it
should have a shape (N_variables, N_observations). A 1D array
may be input for observations of a single variable, in which
case the biweight midvariance will be calculated (no
covariance). Each row of ``data`` represents a variable, and
each column a single observation of all those variables (same as
the `numpy.cov` convention).
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or 1D array-like, optional
The location estimate of each variable, either as a scalar or
array. If ``M`` is an array, then its must be a 1D array
containing the location estimate of each row (i.e. ``a.ndim``
elements). If ``M`` is a scalar value, then its value will be
used for each variable (row). If `None` (default), then the
median of each variable (row) will be used.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of observations of each variable, which follows the
standard definition of biweight midcovariance. If `True`, then
the sample size is reduced to correct for any rejected values
(see formula above), which results in a value closer to the true
covariance for small sample sizes or for a large number of
rejected values.
Returns
-------
biweight_midcovariance : ndarray
A 2D array representing the biweight midcovariances between each
pair of the variables (rows) in the input array. The output
array will have a shape of (N_variables, N_variables). The
diagonal elements will be the biweight midvariances of each
input variable. The off-diagonal elements will be the biweight
midcovariances between each pair of input variables.
See Also
--------
biweight_midvariance, biweight_midcorrelation, biweight_scale, biweight_location
References
----------
.. [1] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwmidc.htm
Examples
--------
Compute the biweight midcovariance between two random variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcovariance
>>> # Generate two random variables x and y
>>> rng = np.random.default_rng(1)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> # Calculate the biweight midcovariances between x and y
>>> bicov = biweight_midcovariance([x, y])
>>> print(bicov) # doctest: +FLOAT_CMP
[[0.83435568 0.02379316]
[0.02379316 7.15665769]]
>>> # Print standard deviation estimates
>>> print(np.sqrt(bicov.diagonal())) # doctest: +FLOAT_CMP
[0.91343072 2.67519302]
"""
data = np.asanyarray(data).astype(np.float64)
# ensure data is 2D
if data.ndim == 1:
data = data[np.newaxis, :]
if data.ndim != 2:
raise ValueError("The input array must be 2D or 1D.")
# estimate location if not given
if M is None:
M = np.median(data, axis=1)
M = np.asanyarray(M)
if M.ndim > 1:
raise ValueError("M must be a scalar or 1D array.")
# set up the differences
d = (data.T - M).T
# set up the weighting
mad = median_absolute_deviation(data, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
u = (d.T / (c * mad)).T
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
mask = np.abs(u) < 1
u = u**2
if modify_sample_size:
maskf = mask.astype(float)
n = np.inner(maskf, maskf)
else:
n = data[0].size
usub1 = 1.0 - u
usub5 = 1.0 - 5.0 * u
usub1[~mask] = 0.0
with np.errstate(divide="ignore", invalid="ignore"):
numerator = d * usub1**2
denominator = (usub1 * usub5).sum(axis=1)[:, np.newaxis]
numerator_matrix = np.dot(numerator, numerator.T)
denominator_matrix = np.dot(denominator, denominator.T)
value = n * (numerator_matrix / denominator_matrix)
idx = np.where(mad == 0)[0]
value[idx, :] = 0
value[:, idx] = 0
return value
def biweight_midcorrelation(x, y, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcorrelation between two variables.
The `biweight midcorrelation
<https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ is a
measure of similarity between samples. It is given by:
.. math::
r_{bicorr} = \frac{\zeta_{xy}}{\sqrt{\zeta_{xx} \ \zeta_{yy}}}
where :math:`\zeta_{xx}` is the biweight midvariance of :math:`x`,
:math:`\zeta_{yy}` is the biweight midvariance of :math:`y`, and
:math:`\zeta_{xy}` is the biweight midcovariance of :math:`x` and
:math:`y`.
Parameters
----------
x, y : 1D array-like
Input arrays for the two variables. ``x`` and ``y`` must be 1D
arrays and have the same number of elements.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0). See
`biweight_midcovariance` for more details.
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified). See
`biweight_midcovariance` for more details.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midcovariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true midcovariance for small sample sizes or for a
large number of rejected values. See `biweight_midcovariance`
for more details.
Returns
-------
biweight_midcorrelation : float
The biweight midcorrelation between ``x`` and ``y``.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance, biweight_location
References
----------
.. [1] https://en.wikipedia.org/wiki/Biweight_midcorrelation
Examples
--------
Calculate the biweight midcorrelation between two variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcorrelation
>>> rng = np.random.default_rng(12345)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> bicorr = biweight_midcorrelation(x, y)
>>> print(bicorr) # doctest: +FLOAT_CMP
-0.09203238319481295
"""
x = np.asanyarray(x)
y = np.asanyarray(y)
if x.ndim != 1:
raise ValueError("x must be a 1D array.")
if y.ndim != 1:
raise ValueError("y must be a 1D array.")
if x.shape != y.shape:
raise ValueError("x and y must have the same shape.")
bicorr = biweight_midcovariance(
[x, y], c=c, M=M, modify_sample_size=modify_sample_size
)
return bicorr[0, 1] / (np.sqrt(bicorr[0, 0] * bicorr[1, 1]))
|
7fc21e960b7d33e7f22d8c34b1d64fb76a9a9723c27ece8af59b3af6c5e3a009 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Methods for selecting the bin width of histograms.
Ported from the astroML project: https://www.astroml.org/
"""
import numpy as np
from .bayesian_blocks import bayesian_blocks
__all__ = [
"histogram",
"scott_bin_width",
"freedman_bin_width",
"knuth_bin_width",
"calculate_bin_edges",
]
def calculate_bin_edges(a, bins=10, range=None, weights=None):
"""
Calculate histogram bin edges like ``numpy.histogram_bin_edges``.
Parameters
----------
a : array-like
Input data. The bin edges are calculated over the flattened array.
bins : int, list, or str, optional
If ``bins`` is an int, it is the number of bins. If it is a list
it is taken to be the bin edges. If it is a string, it must be one
of 'blocks', 'knuth', 'scott' or 'freedman'. See
`~astropy.stats.histogram` for a description of each method.
range : tuple or None, optional
The minimum and maximum range for the histogram. If not specified,
it will be (a.min(), a.max()). However, if bins is a list it is
returned unmodified regardless of the range argument.
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges, though they may be used in the future as new methods are added.
"""
# if range is specified, we need to truncate the data for
# the bin-finding routines
if range is not None:
a = a[(a >= range[0]) & (a <= range[1])]
# if bins is a string, first compute bin edges with the desired heuristic
if isinstance(bins, str):
a = np.asarray(a).ravel()
# TODO: if weights is specified, we need to modify things.
# e.g. we could use point measures fitness for Bayesian blocks
if weights is not None:
raise NotImplementedError(
"weights are not yet supported for the enhanced histogram"
)
if bins == "blocks":
bins = bayesian_blocks(a)
elif bins == "knuth":
da, bins = knuth_bin_width(a, True)
elif bins == "scott":
da, bins = scott_bin_width(a, True)
elif bins == "freedman":
da, bins = freedman_bin_width(a, True)
else:
raise ValueError(f"unrecognized bin code: '{bins}'")
if range:
# Check that the upper and lower edges are what was requested.
# The current implementation of the bin width estimators does not
# guarantee this, it only ensures that data outside the range is
# excluded from calculation of the bin widths.
if bins[0] != range[0]:
bins[0] = range[0]
if bins[-1] != range[1]:
bins[-1] = range[1]
elif np.ndim(bins) == 0:
# Number of bins was given
bins = np.histogram_bin_edges(a, bins, range=range, weights=weights)
return bins
def histogram(a, bins=10, range=None, weights=None, **kwargs):
"""Enhanced histogram function, providing adaptive binnings.
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as ``numpy.histogram()``.
Parameters
----------
a : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None, optional
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges.
other keyword arguments are described in numpy.histogram().
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
bins = calculate_bin_edges(a, bins=bins, range=range, weights=weights)
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
def scott_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule.
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma / (n ** (1 / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule.
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
v25, v75 = np.percentile(data, [25, 75])
dx = 2 * (v75 - v25) / (n ** (1 / 3))
if return_bins:
dmin, dmax = data.min(), data.max()
Nbins = max(1, np.ceil((dmax - dmin) / dx))
try:
bins = dmin + dx * np.arange(Nbins + 1)
except ValueError as e:
if "Maximum allowed size exceeded" in str(e):
raise ValueError(
"The inter-quartile range of the data is too small: "
f"failed to construct histogram with {Nbins + 1} bins. "
"Please use another bin method, such as "
'bins="scott"'
)
else: # Something else # pragma: no cover
raise
return dx, bins
else:
return dx
def knuth_bin_width(data, return_bins=False, quiet=True):
r"""Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
quiet : bool, optional
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
# import here because of optional scipy dependency
from scipy import optimize
knuthF = _KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx
class _KnuthF:
r"""Class which implements the function minimized by knuth_bin_width.
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
"""
def __init__(self, data):
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
# import here rather than globally: scipy is an optional dependency.
# Note that scipy is imported in the function which calls this,
# so there shouldn't be any issue importing here.
from scipy import special
# create a reference to gammaln to use in self.eval()
self.gammaln = special.gammaln
def bins(self, M):
"""Return the bin edges given M number of bins."""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M):
return self.eval(M)
def eval(self, M):
"""Evaluate the Knuth function.
Parameters
----------
M : int
Number of bins
Returns
-------
F : float
evaluation of the negative Knuth loglikelihood function:
smaller values indicate a better fit.
"""
M = int(M)
if M <= 0:
return np.inf
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(
self.n * np.log(M)
+ self.gammaln(0.5 * M)
- M * self.gammaln(0.5)
- self.gammaln(self.n + 0.5 * M)
+ np.sum(self.gammaln(nk + 0.5))
)
|
f4a2b46309b514f5190f111ea51644c4bd3a17164db56eb14f256a3afc3a64b3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__all__ = ["jackknife_resampling", "jackknife_stats"]
__doctest_requires__ = {"jackknife_stats": ["scipy"]}
def jackknife_resampling(data):
"""Performs jackknife resampling on numpy arrays.
Jackknife resampling is a technique to generate 'n' deterministic samples
of size 'n-1' from a measured sample of size 'n'. Basically, the i-th
sample, (1<=i<=n), is generated by means of removing the i-th measurement
of the original sample. Like the bootstrap resampling, this statistical
technique finds applications in estimating variance, bias, and confidence
intervals.
Parameters
----------
data : ndarray
Original sample (1-D array) from which the jackknife resamples will be
generated.
Returns
-------
resamples : ndarray
The i-th row is the i-th jackknife sample, i.e., the original sample
with the i-th measurement deleted.
References
----------
.. [1] McIntosh, Avery. "The Jackknife Estimation Method".
<https://arxiv.org/abs/1606.00497>
.. [2] Efron, Bradley. "The Jackknife, the Bootstrap, and other
Resampling Plans". Technical Report No. 63, Division of Biostatistics,
Stanford University, December, 1980.
.. [3] Jackknife resampling <https://en.wikipedia.org/wiki/Jackknife_resampling>
"""
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
resamples = np.empty([n, n - 1])
for i in range(n):
resamples[i] = np.delete(data, i)
return resamples
def jackknife_stats(data, statistic, confidence_level=0.95):
"""Performs jackknife estimation on the basis of jackknife resamples.
This function requires `SciPy <https://www.scipy.org/>`_ to be installed.
Parameters
----------
data : ndarray
Original sample (1-D array).
statistic : function
Any function (or vector of functions) on the basis of the measured
data, e.g, sample mean, sample variance, etc. The jackknife estimate of
this statistic will be returned.
confidence_level : float, optional
Confidence level for the confidence interval of the Jackknife estimate.
Must be a real-valued number in (0,1). Default value is 0.95.
Returns
-------
estimate : float or `~numpy.ndarray`
The i-th element is the bias-corrected "jackknifed" estimate.
bias : float or `~numpy.ndarray`
The i-th element is the jackknife bias.
std_err : float or `~numpy.ndarray`
The i-th element is the jackknife standard error.
conf_interval : ndarray
If ``statistic`` is single-valued, the first and second elements are
the lower and upper bounds, respectively. If ``statistic`` is
vector-valued, each column corresponds to the confidence interval for
each component of ``statistic``. The first and second rows contain the
lower and upper bounds, respectively.
Examples
--------
1. Obtain Jackknife resamples:
>>> import numpy as np
>>> from astropy.stats import jackknife_resampling
>>> from astropy.stats import jackknife_stats
>>> data = np.array([1,2,3,4,5,6,7,8,9,0])
>>> resamples = jackknife_resampling(data)
>>> resamples
array([[2., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 9.]])
>>> resamples.shape
(10, 9)
2. Obtain Jackknife estimate for the mean, its bias, its standard error,
and its 95% confidence interval:
>>> test_statistic = np.mean
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
4.5
>>> bias
0.0
>>> stderr # doctest: +FLOAT_CMP
0.95742710775633832
>>> conf_interval
array([2.62347735, 6.37652265])
3. Example for two estimates
>>> test_statistic = lambda x: (np.mean(x), np.var(x))
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
array([4.5 , 9.16666667])
>>> bias
array([ 0. , -0.91666667])
>>> stderr
array([0.95742711, 2.69124476])
>>> conf_interval
array([[ 2.62347735, 3.89192387],
[ 6.37652265, 14.44140947]])
IMPORTANT: Note that confidence intervals are given as columns
"""
# jackknife confidence interval
if not (0 < confidence_level < 1):
raise ValueError("confidence level must be in (0, 1).")
# make sure original data is proper
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
# Only import scipy if inputs are valid
from scipy.special import erfinv
resamples = jackknife_resampling(data)
stat_data = statistic(data)
jack_stat = np.apply_along_axis(statistic, 1, resamples)
mean_jack_stat = np.mean(jack_stat, axis=0)
# jackknife bias
bias = (n - 1) * (mean_jack_stat - stat_data)
# jackknife standard error
std_err = np.sqrt(
(n - 1)
* np.mean((jack_stat - mean_jack_stat) * (jack_stat - mean_jack_stat), axis=0)
)
# bias-corrected "jackknifed estimate"
estimate = stat_data - bias
z_score = np.sqrt(2.0) * erfinv(confidence_level)
conf_interval = estimate + z_score * np.array((-std_err, std_err))
return estimate, bias, std_err, conf_interval
|
fc8a08c24d795ce73a9f1dc3951f6349108fb28216255fb36257ff88bfaa1270 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Indexing for Table columns.
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from copy import deepcopy
import numpy as np
from .bst import MaxValue, MinValue
from .sorted_array import SortedArray
class QueryError(ValueError):
"""
Indicates that a given index cannot handle the supplied query.
"""
pass
class Index:
"""
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
and SCEngine) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
"""
def __init__(self, columns, engine=None, unique=False):
# Local imports to avoid import problems.
from astropy.time import Time
from .table import Column, Table
if columns is not None:
columns = list(columns)
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort())
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(
col.jd, format="jd", scale=col.scale
)
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort()]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
"""
Number of rows in index.
"""
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
"""
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
"""
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
"""
Recreate the index based on data in self.columns.
"""
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
"""
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
"""
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError(f"Column does not belong to index: {col_name}")
def insert_row(self, pos, vals, columns):
"""
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
"""
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[self.col_position(col.info.name)] = vals[i]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
"""
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
"""
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError(
"Expected int, array of ints, or slice but got {} in remove_rows".format(
row_specifier
)
)
def remove_rows(self, row_specifier):
"""
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
"""
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
"""
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
"""
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple(col[row] for col in self.columns), row):
raise ValueError(f"Could not remove row {row} from index")
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
"""
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
"""
return self.data.find(key)
def same_prefix(self, key):
"""
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
"""
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
"""
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
"""
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
"""
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
"""
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
"""
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
"""
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
"""
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
"""
row_map = {row: i for i, row in enumerate(col_slice)}
self.data.replace_rows(row_map)
def sort(self):
"""
Make row numbers follow the same sort order as the keys
of the index.
"""
self.data.sort()
def sorted_data(self):
"""
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
"""
return self.data.sorted_data()
def __getitem__(self, item):
"""
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
"""
return SlicedIndex(self, item)
def __repr__(self):
col_names = tuple(col.info.name for col in self.columns)
return f"<{self.__class__.__name__} columns={col_names} data={self.data}>"
def __deepcopy__(self, memo):
"""
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
"""
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
"""
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : tuple, slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
"""
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
elif isinstance(index_slice, slice): # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
else:
raise TypeError("index_slice must be tuple or slice")
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
"""
The stopping position of the slice, or the end of the
index if this is an original slice.
"""
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
"""
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
"""
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
"""
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
"""
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
"""
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
"""
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def get_index_or_copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.get_index_or_copy().insert_row(self.orig_coords(pos), vals, columns)
def get_row_specifier(self, row_specifier):
return [
self.orig_coords(x) for x in self.index.get_row_specifier(row_specifier)
]
def remove_rows(self, row_specifier):
if not self._frozen:
self.get_index_or_copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.get_index_or_copy().sort()
def __repr__(self):
slice_str = (
"" if self.original else f" slice={self.start}:{self.stop}:{self.step}"
)
return (
f"<{self.__class__.__name__} original={self.original}{slice_str}"
f" index={self.index}>"
)
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
"""
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
"""
from .table import Table
if len(self.columns) == 1:
index = Index([col_slice], engine=self.data.__class__)
return self.__class__(index, slice(0, 0, None), original=True)
t = Table(self.columns, copy_indices=False)
with t.index_mode("discard_on_copy"):
new_cols = t[item].columns.values()
index = Index(new_cols, engine=self.data.__class__)
return self.__class__(index, slice(0, 0, None), original=True)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy=None, names=None):
"""
Inputs a table and some subset of its columns as table_copy.
List or tuple containing names of columns as names,and returns an index
corresponding to this subset or list or None if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`, optional
Subset of the columns in the ``table`` argument
names : list, tuple, optional
Subset of column names in the ``table`` argument
Returns
-------
Index of columns or None
"""
if names is not None and table_copy is not None:
raise ValueError(
'one and only one argument from "table_copy" or "names" is required'
)
if names is None and table_copy is None:
raise ValueError(
'one and only one argument from "table_copy" or "names" is required'
)
if names is not None:
names = set(names)
else:
names = set(table_copy.colnames)
if not names <= set(table.colnames):
raise ValueError(f"{names} is not a subset of table columns")
for name in names:
for index in table[name].info.indices:
if {col.info.name for col in index.columns} == names:
return index
return None
def get_index_by_names(table, names):
"""
Returns an index in ``table`` corresponding to the ``names`` columns or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
nmaes : tuple, list
Column names
"""
names = list(names)
for index in table.indices:
index_names = [col.info.name for col in index.columns]
if index_names == names:
return index
else:
return None
class _IndexModeContext:
"""
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
"""
_col_subclasses = {}
def __init__(self, table, mode):
"""
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
"""
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ("freeze", "discard_on_copy", "copy_on_getitem"):
raise ValueError(
"Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{}'".format(mode)
)
def __enter__(self):
if self.mode == "discard_on_copy":
self.table._copy_indices = False
elif self.mode == "copy_on_getitem":
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == "discard_on_copy":
self.table._copy_indices = True
elif self.mode == "copy_on_getitem":
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = f"_{cls.__name__}WithIndexCopy"
new_cls = type(str(clsname), (cls,), {"__getitem__": __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
"""
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
"""
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item):
"""
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
"""
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError(f"No index found for {item}")
return super().__getitem__(item)
class TableLoc:
"""
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def _get_rows(self, item):
"""
Retrieve Table rows indexes by value slice.
"""
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
p = index.find((key,))
if len(p) == 0:
raise KeyError(f"No matches found for key {key}")
else:
rows.extend(p)
return rows
def __getitem__(self, item):
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f"No matches found for key {item}")
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_rows(key)
if len(rows) == 0: # no matches found
raise KeyError(f"No matches found for key {key}")
elif len(rows) == 1: # single row
self.table[rows[0]] = value
else: # multiple rows
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError(f"Right side should contain {len(rows)} values")
class TableLocIndices(TableLoc):
def __getitem__(self, item):
"""
Retrieve Table row's indices by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f"No matches found for key {item}")
elif len(rows) == 1: # single row
return rows[0]
return rows
class TableILoc(TableLoc):
"""
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
super().__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError(f"Invalid index for iloc: {item}")
return table_slice
|
184cdbe0abfd377bddc46d366818c6339cf760369e224d105e38d3aeea1cd36f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
from collections import OrderedDict
from operator import index as operator_index
import numpy as np
class Row:
"""A class to represent one row of a Table object.
A Row object is returned when a Table object is indexed with an integer
or when iterating over a table::
>>> from astropy.table import Table
>>> table = Table([(1, 2), (3, 4)], names=('a', 'b'),
... dtype=('int32', 'int32'))
>>> row = table[1]
>>> row
<Row index=1>
a b
int32 int32
----- -----
2 4
>>> row['a']
2
>>> row[1]
4
"""
def __init__(self, table, index):
# Ensure that the row index is a valid index (int)
index = operator_index(index)
n = len(table)
if index < -n or index >= n:
raise IndexError(
f"index {index} out of range for table with length {len(table)}"
)
# Finally, ensure the index is positive [#8422] and set Row attributes
self._index = index % n
self._table = table
def __getitem__(self, item):
try:
# Try the most common use case of accessing a single column in the Row.
# Bypass the TableColumns __getitem__ since that does more testing
# and allows a list of tuple or str, which is not the right thing here.
out = OrderedDict.__getitem__(self._table.columns, item)[self._index]
except (KeyError, TypeError):
if self._table._is_list_or_tuple_of_str(item):
cols = [self._table[name] for name in item]
out = self._table.__class__(cols, copy=False)[self._index]
else:
# This is only to raise an exception
out = self._table.columns[item][self._index]
return out
def __setitem__(self, item, val):
if self._table._is_list_or_tuple_of_str(item):
self._table._set_row(self._index, colnames=item, vals=val)
else:
self._table.columns[item][self._index] = val
def _ipython_key_completions_(self):
return self.colnames
def __eq__(self, other):
if self._table.masked:
# Sent bug report to numpy-discussion group on 2012-Oct-21, subject:
# "Comparing rows in a structured masked array raises exception"
# No response, so this is still unresolved.
raise ValueError(
"Unable to compare rows for masked table due to numpy.ma bug"
)
return self.as_void() == other
def __ne__(self, other):
if self._table.masked:
raise ValueError(
"Unable to compare rows for masked table due to numpy.ma bug"
)
return self.as_void() != other
def __array__(self, dtype=None):
"""Support converting Row to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
If the parent table is masked then the mask information is dropped.
"""
if dtype is not None:
raise ValueError("Datatype coercion is not allowed")
return np.asarray(self.as_void())
def __len__(self):
return len(self._table.columns)
def __iter__(self):
index = self._index
for col in self._table.columns.values():
yield col[index]
def keys(self):
return self._table.columns.keys()
def values(self):
return self.__iter__()
@property
def table(self):
return self._table
@property
def index(self):
return self._index
def as_void(self):
"""
Returns a *read-only* copy of the row values in the form of np.void or
np.ma.mvoid objects. This corresponds to the object types returned for
row indexing of a pure numpy structured array or masked array. This
method is slow and its use is discouraged when possible.
Returns
-------
void_row : ``numpy.void`` or ``numpy.ma.mvoid``
Copy of row values.
``numpy.void`` if unmasked, ``numpy.ma.mvoid`` else.
"""
index = self._index
cols = self._table.columns.values()
vals = tuple(np.asarray(col)[index] for col in cols)
if self._table.masked:
mask = tuple(
col.mask[index] if hasattr(col, "mask") else False for col in cols
)
void_row = np.ma.array([vals], mask=[mask], dtype=self.dtype)[0]
else:
void_row = np.array([vals], dtype=self.dtype)[0]
return void_row
@property
def meta(self):
return self._table.meta
@property
def columns(self):
return self._table.columns
@property
def colnames(self):
return self._table.colnames
@property
def dtype(self):
return self._table.dtype
def _base_repr_(self, html=False):
"""
Display row as a single-line table but with appropriate header line.
"""
index = self.index if (self.index >= 0) else self.index + len(self._table)
table = self._table[index : index + 1]
descr_vals = [self.__class__.__name__, f"index={self.index}"]
if table.masked:
descr_vals.append("masked=True")
return table._base_repr_(
html, descr_vals, max_width=-1, tableid=f"table{id(self._table)}"
)
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
index = self.index if (self.index >= 0) else self.index + len(self._table)
return "\n".join(self.table[index : index + 1].pformat(max_width=-1))
def __bytes__(self):
return str(self).encode("utf-8")
collections.abc.Sequence.register(Row)
|
9d4e315823d4f2b24fb272eded94add4c9d05429a3d92f25a4bc4f88f4983ed1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import platform
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .index import get_index_by_names
__all__ = ["TableGroups", "ColumnGroups"]
def table_group_by(table, keys):
# index copies are unnecessary and slow down _table_group_by
with table.index_mode("discard_on_copy"):
return _table_group_by(table, keys)
def _table_group_by(table, keys):
"""
Get groups for ``table`` on specified ``keys``.
Parameters
----------
table : `Table`
Table to group
keys : str, list of str, `Table`, or Numpy array
Grouping key specifier
Returns
-------
grouped_table : Table object with groups attr set accordingly
"""
from .serialize import represent_mixins_as_columns
from .table import Table
# Pre-convert string to tuple of strings, or Table to the underlying structured array
if isinstance(keys, str):
keys = (keys,)
if isinstance(keys, (list, tuple)):
for name in keys:
if name not in table.colnames:
raise ValueError(f"Table does not have key column {name!r}")
if table.masked and np.any(table[name].mask):
raise ValueError(
f"Missing values in key column {name!r} are not allowed"
)
# Make a column slice of the table without copying
table_keys = table.__class__([table[key] for key in keys], copy=False)
# If available get a pre-existing index for these columns
table_index = get_index_by_names(table, keys)
grouped_by_table_cols = True
elif isinstance(keys, (np.ndarray, Table)):
table_keys = keys
if len(table_keys) != len(table):
raise ValueError(
"Input keys array length {} does not match table length {}".format(
len(table_keys), len(table)
)
)
table_index = None
grouped_by_table_cols = False
else:
raise TypeError(
"Keys input must be string, list, tuple, Table or numpy array, but got {}".format(
type(keys)
)
)
# If there is not already an available index and table_keys is a Table then ensure
# that all cols (including mixins) are in a form that can sorted with the code below.
if not table_index and isinstance(table_keys, Table):
table_keys = represent_mixins_as_columns(table_keys)
# Get the argsort index `idx_sort`, accounting for particulars
try:
# take advantage of index internal sort if possible
if table_index is not None:
idx_sort = table_index.sorted_data()
else:
idx_sort = table_keys.argsort(kind="mergesort")
stable_sort = True
except TypeError:
# Some versions (likely 1.6 and earlier) of numpy don't support
# 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable
# sort by default, nor does Windows, while Linux does (or appears to).
idx_sort = table_keys.argsort()
stable_sort = platform.system() not in ("Darwin", "Windows")
# Finally do the actual sort of table_keys values
table_keys = table_keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# If the sort is not stable (preserves original table order) then sort idx_sort in
# place within each group.
if not stable_sort:
for i0, i1 in zip(indices[:-1], indices[1:]):
idx_sort[i0:i1].sort()
# Make a new table and set the _groups to the appropriate TableGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = table.__class__(table[idx_sort])
out_keys = table_keys[indices[:-1]]
if isinstance(out_keys, Table):
out_keys.meta["grouped_by_table_cols"] = grouped_by_table_cols
out._groups = TableGroups(out, indices=indices, keys=out_keys)
return out
def column_group_by(column, keys):
"""
Get groups for ``column`` on specified ``keys``.
Parameters
----------
column : Column object
Column to group
keys : Table or Numpy array of same length as col
Grouping key specifier
Returns
-------
grouped_column : Column object with groups attr set accordingly
"""
from .serialize import represent_mixins_as_columns
from .table import Table
if isinstance(keys, Table):
keys = represent_mixins_as_columns(keys)
keys = keys.as_array()
if not isinstance(keys, np.ndarray):
raise TypeError(f"Keys input must be numpy array, but got {type(keys)}")
if len(keys) != len(column):
raise ValueError(
"Input keys array length {} does not match column length {}".format(
len(keys), len(column)
)
)
idx_sort = keys.argsort()
keys = keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# Make a new column and set the _groups to the appropriate ColumnGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = column.__class__(column[idx_sort])
out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]])
return out
class BaseGroups:
"""
A class to represent groups within a table of heterogeneous data.
- ``keys``: key values corresponding to each group
- ``indices``: index values in parent table or column corresponding to group boundaries
- ``aggregate()``: method to create new table by aggregating within groups
"""
@property
def parent(self):
return (
self.parent_column if isinstance(self, ColumnGroups) else self.parent_table
)
def __iter__(self):
self._iter_index = 0
return self
def next(self):
ii = self._iter_index
if ii < len(self.indices) - 1:
i0, i1 = self.indices[ii], self.indices[ii + 1]
self._iter_index += 1
return self.parent[i0:i1]
else:
raise StopIteration
__next__ = next
def __getitem__(self, item):
parent = self.parent
if isinstance(item, (int, np.integer)):
i0, i1 = self.indices[item], self.indices[item + 1]
out = parent[i0:i1]
out.groups._keys = parent.groups.keys[item]
else:
indices0, indices1 = self.indices[:-1], self.indices[1:]
try:
i0s, i1s = indices0[item], indices1[item]
except Exception as err:
raise TypeError(
"Index item for groups attribute must be a slice, "
"numpy mask or int array"
) from err
mask = np.zeros(len(parent), dtype=bool)
# Is there a way to vectorize this in numpy?
for i0, i1 in zip(i0s, i1s):
mask[i0:i1] = True
out = parent[mask]
out.groups._keys = parent.groups.keys[item]
out.groups._indices = np.concatenate([[0], np.cumsum(i1s - i0s)])
return out
def __repr__(self):
return f"<{self.__class__.__name__} indices={self.indices}>"
def __len__(self):
return len(self.indices) - 1
class ColumnGroups(BaseGroups):
def __init__(self, parent_column, indices=None, keys=None):
self.parent_column = parent_column # parent Column
self.parent_table = parent_column.info.parent_table
self._indices = indices
self._keys = keys
@property
def indices(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.indices
else:
if self._indices is None:
return np.array([0, len(self.parent_column)])
else:
return self._indices
@property
def keys(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.keys
else:
return self._keys
def aggregate(self, func):
from .column import MaskedColumn
i0s, i1s = self.indices[:-1], self.indices[1:]
par_col = self.parent_column
masked = isinstance(par_col, MaskedColumn)
reduceat = hasattr(func, "reduceat")
sum_case = func is np.sum
mean_case = func is np.mean
try:
if not masked and (reduceat or sum_case or mean_case):
if mean_case:
vals = np.add.reduceat(par_col, i0s) / np.diff(self.indices)
else:
if sum_case:
func = np.add
vals = func.reduceat(par_col, i0s)
else:
vals = np.array([func(par_col[i0:i1]) for i0, i1 in zip(i0s, i1s)])
out = par_col.__class__(vals)
except Exception as err:
raise TypeError(
"Cannot aggregate column '{}' with type '{}': {}".format(
par_col.info.name, par_col.info.dtype, err
)
) from err
out_info = out.info
for attr in ("name", "unit", "format", "description", "meta"):
try:
setattr(out_info, attr, getattr(par_col.info, attr))
except AttributeError:
pass
return out
def filter(self, func):
"""
Filter groups in the Column based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept one argument:
- ``column`` : `Column` object
It must then return either `True` or `False`. As an example, the following
will select all column groups with only positive values::
def all_positive(column):
if np.any(column < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Column
New column with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
for i, group_column in enumerate(self):
mask[i] = func(group_column)
return self[mask]
class TableGroups(BaseGroups):
def __init__(self, parent_table, indices=None, keys=None):
self.parent_table = parent_table # parent Table
self._indices = indices
self._keys = keys
@property
def key_colnames(self):
"""
Return the names of columns in the parent table that were used for grouping.
"""
# If the table was grouped by key columns *in* the table then treat those columns
# differently in aggregation. In this case keys will be a Table with
# keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we
# need to handle this.
grouped_by_table_cols = getattr(self.keys, "meta", {}).get(
"grouped_by_table_cols", False
)
return self.keys.colnames if grouped_by_table_cols else ()
@property
def indices(self):
if self._indices is None:
return np.array([0, len(self.parent_table)])
else:
return self._indices
def aggregate(self, func):
"""
Aggregate each group in the Table into a single row by applying the reduction
function ``func`` to group values in each column.
Parameters
----------
func : function
Function that reduces an array of values to a single value
Returns
-------
out : Table
New table with the aggregated rows.
"""
i0s = self.indices[:-1]
out_cols = []
parent_table = self.parent_table
for col in parent_table.columns.values():
# For key columns just pick off first in each group since they are identical
if col.info.name in self.key_colnames:
new_col = col.take(i0s)
else:
try:
new_col = col.info.groups.aggregate(func)
except TypeError as err:
warnings.warn(str(err), AstropyUserWarning)
continue
out_cols.append(new_col)
return parent_table.__class__(out_cols, meta=parent_table.meta)
def filter(self, func):
"""
Filter groups in the Table based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept two arguments:
- ``table`` : `Table` object
- ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping
It must then return either `True` or `False`. As an example, the following
will select all table groups with only positive values in the non-key columns::
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Table
New table with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
key_colnames = self.key_colnames
for i, group_table in enumerate(self):
mask[i] = func(group_table, key_colnames)
return self[mask]
@property
def keys(self):
return self._keys
|
d296025d03994204eb851e636c25564e04bb71b6c4daae6420153c274df84dec | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import sys
import types
import warnings
import weakref
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy import log
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.units import Quantity, QuantityInfo
from astropy.utils import ShapedLikeNDArray, isiterable
from astropy.utils.console import color_print
from astropy.utils.data_info import BaseColumnInfo, DataInfo, MixinInfo
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
from astropy.utils.metadata import MetaAttribute, MetaData
from . import conf, groups
from .column import (
BaseColumn,
Column,
FalseArray,
MaskedColumn,
_auto_names,
_convert_sequence_data_to_array,
col_copy,
)
from .connect import TableRead, TableWrite
from .index import (
Index,
SlicedIndex,
TableILoc,
TableIndices,
TableLoc,
TableLocIndices,
_IndexModeContext,
get_index,
)
from .info import TableInfo
from .mixins.registry import get_mixin_handler
from .ndarray_mixin import NdarrayMixin # noqa: F401
from .pprint import TableFormatter
from .row import Row
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = [
"Table.read",
"Table.write",
"Table._read",
"Table.convert_bytestring_to_unicode",
"Table.convert_unicode_to_bytestring",
]
__doctest_requires__ = {"*pandas": ["pandas>=1.1"]}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of lines in table output.
max_width : int or None
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = "O" if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, "shape") else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
"""Check if the object's info is an instance of cls."""
# We check info on the class of the instance, since on the instance
# itself accessing 'info' has side effects in that it sets
# obj.__dict__['info'] if it does not exist already.
return isinstance(getattr(obj.__class__, "info", None), cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, Mapping):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError(
f"Illegal key or index value for {type(self).__name__} object"
)
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError(
f"Cannot replace column '{item}'. Use Table.replace_column() instead."
)
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return f"<{self.__class__.__name__} names=({','.join(names)})>"
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
# Rename column names in pprint include/exclude attributes as needed
parent_table = self[name].info.parent_table
if parent_table is not None:
parent_table.pprint_exclude_names._rename(name, new_name)
parent_table.pprint_include_names._rename(name, new_name)
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
def __delitem__(self, name):
# Remove column names from pprint include/exclude attributes as needed.
# __delitem__ also gets called for pop() and popitem().
parent_table = self[name].info.parent_table
if parent_table is not None:
# _remove() method does not require that `name` is in the attribute
parent_table.pprint_exclude_names._remove(name)
parent_table.pprint_include_names._remove(name)
return super().__delitem__(name)
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableAttribute(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
OrderedDict([('__attributes__', {'identifier': 10})])
"""
class PprintIncludeExclude(TableAttribute):
"""Maintain tuple that controls table column visibility for print output.
This is a descriptor that inherits from MetaAttribute so that the attribute
value is stored in the table meta['__attributes__'].
This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table
attributes.
"""
def __get__(self, instance, owner_cls):
"""Get the attribute.
This normally returns an instance of this class which is stored on the
owner object.
"""
# For getting from class not an instance
if instance is None:
return self
# If not already stored on `instance`, make a copy of the class
# descriptor object and put it onto the instance.
value = instance.__dict__.get(self.name)
if value is None:
value = deepcopy(self)
instance.__dict__[self.name] = value
# We set _instance_ref on every call, since if one makes copies of
# instances, this attribute will be copied as well, which will lose the
# reference.
value._instance_ref = weakref.ref(instance)
return value
def __set__(self, instance, names):
"""Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
if isinstance(names, str):
names = [names]
if names is None:
# Remove attribute value from the meta['__attributes__'] dict.
# Subsequent access will just return None.
delattr(instance, self.name)
else:
# This stores names into instance.meta['__attributes__'] as tuple
return super().__set__(instance, tuple(names))
def __call__(self):
"""Get the value of the attribute.
Returns
-------
names : None, tuple
Include/exclude names
"""
# Get the value from instance.meta['__attributes__']
instance = self._instance_ref()
return super().__get__(instance, instance.__class__)
def __repr__(self):
if hasattr(self, "_instance_ref"):
out = f"<{self.__class__.__name__} name={self.name} value={self()}>"
else:
out = super().__repr__()
return out
def _add_remove_setup(self, names):
"""Common setup for add and remove.
- Coerce attribute value to a list
- Coerce names into a list
- Get the parent table instance
"""
names = [names] if isinstance(names, str) else list(names)
# Get the value. This is the same as self() but we need `instance` here.
instance = self._instance_ref()
value = super().__get__(instance, instance.__class__)
value = [] if value is None else list(value)
return instance, names, value
def add(self, names):
"""Add ``names`` to the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to add
"""
instance, names, value = self._add_remove_setup(names)
value.extend(name for name in names if name not in value)
super().__set__(instance, tuple(value))
def remove(self, names):
"""Remove ``names`` from the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to remove
"""
self._remove(names, raise_exc=True)
def _remove(self, names, raise_exc=False):
"""Remove ``names`` with optional checking if they exist."""
instance, names, value = self._add_remove_setup(names)
# Return now if there are no attributes and thus no action to be taken.
if not raise_exc and "__attributes__" not in instance.meta:
return
# Remove one by one, optionally raising an exception if name is missing.
for name in names:
if name in value:
value.remove(name) # Using the list.remove method
elif raise_exc:
raise ValueError(f"{name} not in {self.name}")
# Change to either None or a tuple for storing back to attribute
value = None if value == [] else tuple(value)
self.__set__(instance, value)
def _rename(self, name, new_name):
"""Rename ``name`` to ``new_name`` if ``name`` is in the list."""
names = self() or ()
if name in names:
new_names = list(names)
new_names[new_names.index(name)] = new_name
self.set(new_names)
def set(self, names):
"""Set value of include/exclude attribute to ``names``.
Parameters
----------
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
class _Context:
def __init__(self, descriptor_self):
self.descriptor_self = descriptor_self
self.names_orig = descriptor_self()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
descriptor_self = self.descriptor_self
instance = descriptor_self._instance_ref()
descriptor_self.__set__(instance, self.names_orig)
def __repr__(self):
return repr(self.descriptor_self)
ctx = _Context(descriptor_self=self)
instance = self._instance_ref()
self.__set__(instance, names)
return ctx
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: https://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
pprint_exclude_names = PprintIncludeExclude()
pprint_include_names = PprintIncludeExclude()
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : array or `~numpy.ma.MaskedArray`
Copy of table as a numpy structured array.
ndarray for unmasked or `~numpy.ma.MaskedArray` for masked.
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
if not (col.info.dtype.isnative or keep_byteorder):
new_dt = np.dtype(col_descr[1]).newbyteorder("=")
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, "mask"):
data[col.info.name].mask = col.mask
return data
def __init__(
self,
data=None,
masked=False,
names=None,
dtype=None,
meta=None,
copy=True,
rows=None,
copy_indices=True,
units=None,
descriptions=None,
**kwargs,
):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError("Cannot specify dtype when copy=False")
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError("Cannot supply both `data` and `rows` values")
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
# Handle custom (subclass) table attributes that are stored in meta.
# These are defined as class attributes using the TableAttribute
# descriptor. Any such attributes get removed from kwargs here and
# stored for use after the table is otherwise initialized. Any values
# provided via kwargs will have precedence over existing values from
# meta (e.g. from data as a Table or meta via kwargs).
meta_table_attrs = {}
if kwargs:
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, TableAttribute):
meta_table_attrs[attr] = kwargs.pop(attr)
if hasattr(data, "__astropy_table__"):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError(
f"__init__() got unexpected keyword argument {list(kwargs.keys())[0]!r}"
)
if isinstance(data, np.ndarray) and data.shape == (0,) and not data.dtype.names:
data = None
if isinstance(data, self.Row):
data = data._table[data._index : data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (
names_from_list_of_dict or _get_names_from_list_of_dict(data)
)
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError("Can not initialize a Table with a scalar")
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
# Table was initialized as `t = Table()`. Set up for empty
# table with names=[], data=[], and n_cols=0.
# self._init_from_list() will simply return, giving the
# expected empty table.
names = []
else:
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError(
"dtype was specified but could not be "
"parsed for column names"
)
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError(f"Data type {type(data)} not allowed to init Table")
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if dtype is None:
dtype = [None] * n_cols
elif isinstance(dtype, np.dtype):
if default_names is None:
default_names = dtype.names
# Convert a numpy dtype input to a list of dtypes for later use.
dtype = [dtype[name] for name in dtype.names]
if names is None:
names = default_names or [None] * n_cols
names = [None if name is None else str(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Update meta with TableAttributes supplied as kwargs in Table init.
# This takes precedence over previously-defined meta.
if meta_table_attrs:
for attr, value in meta_table_attrs.items():
setattr(self, attr, value)
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute("unit", units)
self._set_column_attribute("description", descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, Mapping):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(
f"sequence of {attr} values must match number of columns"
)
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(
f"invalid column name {name} for setting {attr} attribute"
)
# Special case: ignore unit if it is an empty or blank string
if attr == "unit" and isinstance(value, str):
if value.strip() == "":
value = None
if value not in (np.ma.masked, None):
setattr(self[name].info, attr, value)
def __getstate__(self):
columns = OrderedDict(
(key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items()
)
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table(
[
getattr(col, "mask", FalseArray(col.shape))
for col in self.itercols()
],
names=self.colnames,
copy=False,
)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property.
"""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : `~astropy.table.Table`
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [
col.filled(fill_value) if hasattr(col, "filled") else col
for col in self.itercols()
]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
"""
Return the indices associated with columns of the table
as a TableIndices object.
"""
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum(index is x for x in lst) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
"""
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
"""
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
"""
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
"""
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
"""
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, either `~astropy.table.SortedArray`,
`~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied
argument is None (by default), use `~astropy.table.SortedArray`.
unique : bool
Whether the values of the index must be unique. Default is False.
"""
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, "_supports_indexing", False):
raise ValueError(
'Cannot create an index on column "{}", of type "{}"'.format(
col.info.name, type(col)
)
)
is_primary = not self.indices
index = Index(columns, engine=engine, unique=unique)
sliced_index = SlicedIndex(index, slice(0, 0, None), original=True)
if is_primary:
self.primary_key = colnames
for col in columns:
col.info.indices.append(sliced_index)
def remove_indices(self, colname):
"""
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
"""
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
"""
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
"""
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
if np.dtype(dtype) != object:
raise ValueError("Datatype coercion is not allowed")
out = np.array(None, dtype=object)
out[()] = self
return out
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, "dtype"), (names, "names")):
if not isiterable(inp_list):
raise ValueError(f"{inp_str} must be a list or None")
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
if set(data[0].keys()) == names_from_data:
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
# Special case of initializing an empty table like `t = Table()`. No
# action required at this point.
if n_cols == 0:
return
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dtype in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dtype, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(
self, data, copy=True, default_name=None, dtype=None, name=None
):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# If the data is not an instance of Column or a mixin class, we can
# check the registry of mixin 'handlers' to see if the column can be
# converted to a mixin class
if (handler := get_mixin_handler(data)) is not None:
original_data = data
data = handler(data)
if not (data_is_mixin := self._is_mixin_for_table(data)):
fully_qualified_name = (
original_data.__class__.__module__
+ "."
+ original_data.__class__.__name__
)
raise TypeError(
"Mixin handler for object of type "
f"{fully_qualified_name} "
"did not return a valid mixin column"
)
# Get the final column name using precedence. Some objects may not
# have an info attribute. Also avoid creating info as a side effect.
if not name:
if isinstance(data, Column):
name = data.name or default_name
elif "info" in getattr(data, "__dict__", ()):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute. If not copying, take a slice
# to ensure we get a new instance and we do not share metadata
# like info.
col = col_copy(data, copy_indices=self._init_indices) if copy else data[:]
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, (np.ma.MaskedArray, Masked)):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif data is None:
# Special case for data passed as the None object (for broadcasting
# to an object column). Need to turn data into numpy `None` scalar
# object, otherwise `Column` interprets data=None as no data instead
# of a object column of `None`.
data = np.array(None)
col_cls = self.ColumnClass
elif not hasattr(data, "dtype"):
# `data` is none of the above, convert to numpy array or MaskedArray
# assuming only that it is a scalar or sequence or N-d nested
# sequence. This function is relatively intricate and tries to
# maintain performance for common cases while handling things like
# list input with embedded np.ma.masked entries. If `data` is a
# scalar then it gets returned unchanged so the original object gets
# passed to `Column` later.
data = _convert_sequence_data_to_array(data, dtype)
copy = False # Already made a copy above
col_cls = (
masked_col_cls
if isinstance(data, np.ma.MaskedArray)
else self.ColumnClass
)
else:
col_cls = self.ColumnClass
try:
col = col_cls(
name=name,
data=data,
dtype=dtype,
copy=copy,
copy_indices=self._init_indices,
)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError("unable to convert data to Column for Table")
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array."""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = (
[data[name] for name in data_names]
if struct
else [data[:, i] for i in range(n_cols)]
)
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns."""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects."""
lengths = {len(col) for col in cols}
if len(lengths) > 1:
raise ValueError(f"Inconsistent data column lengths: {lengths}")
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(
table, newcols, verify=False, names=self.columns.keys()
)
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError("Cannot have None for column name")
if len(set(names)) != len(names):
raise ValueError("Duplicate column names")
table.columns = table.TableColumns(
(name, col) for name, col in zip(names, cols)
)
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, "mask"):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(
self,
html=False,
descr_vals=None,
max_width=None,
tableid=None,
show_dtype=True,
max_lines=None,
tableclass=None,
):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append("masked=True")
descr_vals.append(f"length={len(self)}")
descr = " ".join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = f"<i>{xml_escape(descr)}</i>\n"
else:
descr = f"<{descr}>\n"
if tableid is None:
tableid = f"table{id(self)}"
data_lines, outs = self.formatter._pformat_table(
self,
tableid=tableid,
html=html,
max_width=max_width,
show_name=True,
show_unit=None,
show_dtype=show_dtype,
max_lines=max_lines,
tableclass=tableclass,
)
out = descr + "\n".join(data_lines)
return out
def _repr_html_(self):
out = self._base_repr_(
html=True, max_width=-1, tableclass=conf.default_notebook_table_class
)
# Wrap <table> in <div>. This follows the pattern in pandas and allows
# table to be scrollable horizontally in VS Code notebook display.
out = f"<div>{out}</div>"
return out
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return "\n".join(self.pformat())
def __bytes__(self):
return str(self).encode("utf-8")
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
return any(hasattr(col, "mask") and np.any(col.mask) for col in self.itercols())
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not [Masked]Quantity (which gets converted to
# [Masked]Column with unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
n_header = outs["n_header"]
for i, line in enumerate(lines):
if i < n_header:
color_print(line, "red")
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(
max_lines, max_width, show_name, show_unit, show_dtype, align
)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + list(self.columns.values()), copy=False)
else:
return self
def show_in_notebook(
self,
tableid=None,
css=None,
display_length=50,
table_class="astropy-default",
show_row_index="idx",
):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or None
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or None
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from IPython.display import HTML
from .jsviewer import JSViewer
if tableid is None:
tableid = f"table{id(self)}-{np.random.randint(1, 1e6)}"
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == "astropy-default":
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(
html=True,
max_width=-1,
tableid=tableid,
max_lines=-1,
show_dtype=False,
tableclass=table_class,
)
columns = display_table.columns.values()
sortable_columns = [
i for i, col in enumerate(columns) if col.info.dtype.kind in "iufc"
]
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(
self,
max_lines=5000,
jsviewer=False,
browser="default",
jskwargs={"use_local_files": True},
tableid=None,
table_class="display compact",
css=None,
show_row_index="idx",
):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or None
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or None
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import tempfile
import webbrowser
from urllib.parse import urljoin
from urllib.request import pathname2url
from .jsviewer import DEFAULT_CSS
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, "table.html")
with open(path, "w") as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(
tmp,
format="jsviewer",
css=css,
max_lines=max_lines,
jskwargs=jskwargs,
table_id=tableid,
table_class=table_class,
)
else:
self.write(tmp, format="html")
try:
br = webbrowser.get(None if browser == "default" else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin("file:", pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
html=html,
tableid=tableid,
tableclass=tableclass,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(
max_lines,
max_width,
show_name,
show_unit,
show_dtype,
html,
tableid,
align,
tableclass,
)
def more(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
self.formatter._more_tabcol(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__(
[self[x] for x in item], copy_indices=self._copy_indices
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif (isinstance(item, np.ndarray) and item.size == 0) or (
isinstance(item, (tuple, list)) and not item
):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (
isinstance(item, (slice, np.ndarray, list))
or isinstance(item, tuple)
and all(isinstance(x, np.ndarray) for x in item)
):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (
not getattr(self, "_setitem_inplace", False)
and not conf.replace_inplace
):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (
isinstance(item, (slice, np.ndarray, list))
or isinstance(item, tuple)
and all(isinstance(x, np.ndarray) for x in item)
):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError(
"Right side value needs {} elements (one for each column)".format(
n_cols
)
)
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif isinstance(item, (list, tuple, np.ndarray)) and all(
isinstance(x, str) for x in item
):
self.remove_columns(item)
elif (
isinstance(item, (list, np.ndarray)) and np.asarray(item).dtype.kind == "i"
):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError("illegal key or index value")
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception(
"Masked attribute is read-only (use t = Table(t, masked=True)"
" to convert to a masked table)"
)
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings."""
return (
isinstance(names, (tuple, list))
and names
and all(isinstance(x, str) for x in names)
)
def keys(self):
return list(self.columns.keys())
def values(self):
return self.columns.values()
def items(self):
return self.columns.items()
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(
self,
col,
index=None,
name=None,
rename_duplicate=False,
copy=True,
default_name=None,
):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or None
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or None
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = f"col{len(self.columns)}"
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(
col, name=name, copy=copy, default_name=default_name
)
# Assigning a scalar column to an empty table should result in an
# exception (see #3811).
if col.shape == () and len(self) == 0:
raise TypeError("Empty table cannot have column set to scalar value")
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
elif (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, "shape", ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape, subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape, subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError("Inconsistent data column lengths")
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + "_" + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(
self, cols, indexes=None, names=None, copy=True, rename_duplicate=False
):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of object
List of data objects for the new columns
indexes : list of int or None
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
See Also
--------
astropy.table.hstack, update, replace_column
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError("Number of indexes must match number of cols")
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError("Number of names must match number of cols")
default_names = [f"col{ii + len(self.columns)}" for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes, kind="stable")):
self.add_column(
cols[ii],
index=indexes[ii],
name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate,
copy=copy,
)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
refcount = None
old_col = None
# sys.getrefcount is CPython specific and not on PyPy.
if (
"refcount" in warns
and name in self.colnames
and hasattr(sys, "getrefcount")
):
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if "always" in warns:
warnings.warn(
f"replaced column '{name}'", TableReplaceWarning, stacklevel=3
)
if "slice" in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = (
"replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
# sys.getrefcount is CPython specific and not on PyPy.
if "refcount" in warns and hasattr(sys, "getrefcount"):
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = (
"replaced column '{}' and the number of references "
"to the column changed.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if "attributes" in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = "replaced column '{}' and column attributes {} changed.".format(
name, changed_attrs
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : `~astropy.table.Column` or `~numpy.ndarray` or sequence
New column object to replace the existing column.
copy : bool
Make copy of the input ``col``, default=True
See Also
--------
add_columns, astropy.table.hstack, update
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f"column name {name} is not in the table")
if self[name].info.indices:
raise ValueError("cannot replace a table index column")
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError("length of new column must match table length")
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice or int or array of int
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterable
Iterator returns tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f"{name} is not a valid column name")
cols = (self[name] for name in names)
out = zip(*cols)
return out
def _set_of_names_in_colnames(self, names):
"""Return ``names`` as a set if valid, or raise a `KeyError`.
``names`` is valid if all elements in it are in ``self.colnames``.
If ``names`` is a string then it is interpreted as a single column
name.
"""
names = {names} if isinstance(names, str) else set(names)
invalid_names = names.difference(self.colnames)
if len(invalid_names) == 1:
raise KeyError(f'column "{invalid_names.pop()}" does not exist')
elif len(invalid_names) > 1:
raise KeyError(f"columns {invalid_names} do not exist")
return names
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : str or iterable of str
Names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
"""
for name in self._set_of_names_in_colnames(names):
del self.columns[name]
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, "utf-8"))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in (
col.info.attr_names - col.info._attrs_no_copy - {"dtype"}
):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype("S", "U", np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype("U", "S", np.char.encode)
def keep_columns(self, names):
"""
Keep only the columns specified (remove the others).
Parameters
----------
names : str or iterable of str
The columns to keep. All other columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
"""
names = self._set_of_names_in_colnames(names)
for colname in self.colnames:
if colname not in names:
del self.columns[colname]
def rename_column(self, name, new_name):
"""
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
"""
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
"""
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
"""
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError(
"input 'new_names' must be a tuple or a list of column names"
)
if len(names) != len(new_names):
raise ValueError(
"input 'names' and 'new_names' list arguments must be the same length"
)
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError(
"right hand side must be a sequence of values with "
"the same length as the number of selected columns"
)
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError(
f"Index {index} is out of bounds for table with length {N}"
)
if index < 0:
index += N
if isinstance(vals, Mapping) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not isinstance(mask, Mapping):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError("keys in mask should match keys in vals")
if vals and any(name not in colnames for name in vals):
raise ValueError("Keys in vals must all be valid column names")
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, "dtype"):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError("Mismatch between number of vals and columns")
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError("Mismatch between number of masks and columns")
else:
mask = [False] * len(self.columns)
else:
raise TypeError("Vals must be an iterable or mapping or None")
# Insert val at index for each column
columns = self.TableColumns()
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
try:
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if (
mask_
and isinstance(col, Column)
and not isinstance(col, MaskedColumn)
):
col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError(
"Incorrect length for column {} after inserting {}"
" (expected {}, got {})".format(name, val, len(newcol), N + 1)
)
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, "mask"):
newcol[index] = np.ma.masked
else:
raise TypeError(
"mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name)
)
columns[name] = newcol
except Exception as err:
raise ValueError(
"Unable to insert row because of exception in column '{}':\n{}".format(
name, err
)
) from err
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def update(self, other, copy=True):
"""
Perform a dictionary-style update and merge metadata.
The argument ``other`` must be a |Table|, or something that can be used
to initialize a table. Columns from (possibly converted) ``other`` are
added to this table. In case of matching column names the column from
this table is replaced with the one from ``other``.
Parameters
----------
other : table-like
Data to update this table with.
copy : bool
Whether the updated columns should be copies of or references to
the originals.
See Also
--------
add_columns, astropy.table.hstack, replace_column
Examples
--------
Update a table with another table::
>>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0})
>>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2})
>>> t1.update(t2)
>>> t1
<Table length=2>
a b c
str3 float64 float64
---- ------- -------
foo 1.0 7.0
bar 2.0 11.0
>>> t1.meta
{'i': 0, 'n': 2}
Update a table with a dictionary::
>>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]})
>>> t.update({'b': [1., 2.]})
>>> t
<Table length=2>
a b
str3 float64
---- -------
foo 1.0
bar 2.0
"""
from .operations import _merge_table_meta
if not isinstance(other, Table):
other = self.__class__(other, copy=copy)
common_cols = set(self.colnames).intersection(other.colnames)
for name, col in other.items():
if name in common_cols:
self.replace_column(name, col, copy=copy)
else:
self.add_column(col, name=name, copy=copy)
_merge_table_meta(self, [self, other], metadata_conflicts="silent")
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is slower than ndarray (e.g. a
# factor of ~6 for a 10 million long random array), and much slower
# for in principle sortable columns like Time, which get stored as
# object arrays.
if len(keys) > 1:
kwargs["order"] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]]
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs["kind"] = kind
# np.argsort will look for a possible .argsort method (e.g., for Time),
# and if that fails cast to an array and try sorting that way.
idx = np.argsort(data, **kwargs)
return idx[::-1] if reverse else idx
def sort(self, keys=None, *, kind=None, reverse=False):
"""
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
"""
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys, kind=kind, reverse=reverse)
with self.index_mode("freeze"):
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
def reverse(self):
"""
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
"""
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
def round(self, decimals=0):
"""
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
"""
if isinstance(decimals, Mapping):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
def copy(self, copy_data=True):
"""
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
"""
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, "_groups"):
out._groups = groups.TableGroups(
out, indices=self._groups._indices, keys=self._groups._keys
)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError("cannot compare tables with different column names")
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
eq = self[name] == other[name]
if (
warns
and issubclass(warns[-1].category, FutureWarning)
and "elementwise comparison failed" in str(warns[-1].message)
):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f"unable to compare column {name}") from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (
isinstance(eq, np.ndarray)
and eq.dtype is np.dtype("bool")
and len(eq) == len(self)
):
raise TypeError(
f"comparison for column {name} returned {eq} "
"instead of the expected boolean ndarray"
)
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, "_groups"):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``.
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None, use_nullable_int=True):
"""
Return a :class:`pandas.DataFrame` instance.
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In addition to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Parameters
----------
index : None, bool, str
Specify DataFrame index mode
use_nullable_int : bool, default=True
Convert integer MaskedColumn to pandas nullable integer type.
If ``use_nullable_int=False`` or the pandas version does not support
nullable integer types (version < 0.24), then the column is converted
to float with NaN for missing elements and a warning is issued.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 0 days 00:00:03
2002-01-01 2.0 6.0 8.0 0 days 00:03:20
"""
from pandas import DataFrame, Series
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError(
"index must be None, False, True or a table column name"
)
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.time import TimeBase, TimeDelta
from . import serialize
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = (
col_copy(col, copy_indices=False) if col.info.indices else col
)
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
# Certain subclasses (e.g. TimeSeries) may generate new indices on
# table creation, so make sure there are no indices on the table.
for col in tbl.itercols():
col.info.indices.clear()
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype("timedelta64[ns]")
nat = np.timedelta64("NaT")
else:
new_col = col.datetime64.copy()
nat = np.datetime64("NaT")
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
# fmt: off
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)'
)
# fmt: on
out = OrderedDict()
for name, column in tbl.columns.items():
if getattr(column.dtype, "isnative", True):
out[name] = column
else:
out[name] = column.data.byteswap().newbyteorder("=")
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ["i", "u"]:
pd_dtype = column.dtype.name
if use_nullable_int:
# Convert int64 to Int64, uint32 to UInt32, etc for nullable types
pd_dtype = pd_dtype.replace("i", "I").replace("u", "U")
out[name] = Series(out[name], dtype=pd_dtype)
# If pandas is older than 0.24 the type may have turned to float
if column.dtype.kind != out[name].dtype.kind:
warnings.warn(
f"converted column '{name}' from {column.dtype} to"
f" {out[name].dtype}",
TableReplaceWarning,
stacklevel=3,
)
elif column.dtype.kind not in ["f", "c"]:
out[name] = column.astype(object).filled(np.nan)
kwargs = {}
if index:
idx = out.pop(index)
kwargs["index"] = idx
# We add the table index to Series inputs (MaskedColumn with int values) to override
# its default RangeIndex, see #11432
for v in out.values():
if isinstance(v, Series):
v.index = idx
df = DataFrame(out, **kwargs)
if index:
# Explicitly set the pandas DataFrame index to the original table
# index name.
df.index.name = idx.info.name
return df
@classmethod
def from_pandas(cls, dataframe, index=False, units=None):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance.
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
units: dict
A dict mapping column names to to a `~astropy.units.Unit`.
The columns will have the specified unit in the Table.
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 0 days 00:00:01 3.0
1 2002-01-01 0 days 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
Time TimeDelta float64
----------------------- --------- -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or "index"
while index_name in names:
index_name = "_" + index_name + "_"
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
if units is None:
units = [None] * len(names)
else:
if not isinstance(units, Mapping):
raise TypeError('Expected a Mapping "column-name" -> "unit"')
not_found = set(units.keys()) - set(names)
if not_found:
warnings.warn(f"`units` contains additional columns: {not_found}")
units = [units.get(name) for name in names]
for name, column, data, mask, unit in zip(names, columns, datas, masks, units):
if column.dtype.kind in ["u", "i"] and np.any(mask):
# Special-case support for pandas nullable int
np_dtype = str(column.dtype).lower()
data = np.zeros(shape=column.shape, dtype=np_dtype)
data[~mask] = column[~mask]
out[name] = MaskedColumn(
data=data, name=name, mask=mask, unit=unit, copy=False
)
continue
if data.dtype.kind == "O":
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b""
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == "M":
from astropy.time import Time
out[name] = Time(data, format="datetime64")
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = "isot"
# Numpy timedelta64
elif data.dtype.kind == "m":
from astropy.time import TimeDelta
data_sec = data.astype("timedelta64[ns]").astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format="sec")
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)
else:
out[name] = Column(data=data, name=name, unit=unit)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
See also:
- https://docs.astropy.org/en/stable/table/
- https://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, "unit", None) is not None:
# We need to turn the column into a quantity; use subok=True to allow
# Quantity subclasses identified in the unit (such as u.mag()).
q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity
try:
qcol = q_cls(col.data, col.unit, copy=False, subok=True)
except Exception as exc:
warnings.warn(
f"column {col.info.name} has a unit but is kept as "
f"a {col.__class__.__name__} as an attempt to "
f"convert it to Quantity failed with:\n{exc!r}",
AstropyUserWarning,
)
else:
qcol.info = col.info
qcol.info.indices = col.info.indices
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
|
3046c87b4cfe8aa435a376de20c647d23fa16d9b4fc6996145750415497415c8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import warnings
import weakref
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy.units import Quantity, StructuredUnit, Unit
from astropy.utils.console import color_print
from astropy.utils.data_info import BaseColumnInfo, dtype_info_name
from astropy.utils.metadata import MetaData
from astropy.utils.misc import dtype_bytes_or_chars
from . import groups, pprint
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
# Always emit this warning, not just the first instance
warnings.simplefilter("always", StringTruncateWarning)
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = {
np.greater,
np.greater_equal,
np.less,
np.less_equal,
np.not_equal,
np.equal,
np.isfinite,
np.isinf,
np.isnan,
np.sign,
np.signbit,
}
def col_copy(col, copy_indices=True):
"""
Mixin-safe version of Column.copy() (with copy_data=True).
Parameters
----------
col : Column or mixin column
Input column
copy_indices : bool
Copy the column ``indices`` attribute
Returns
-------
col : Copy of input column
"""
if isinstance(col, BaseColumn):
return col.copy()
newcol = col.copy() if hasattr(col, "copy") else deepcopy(col)
# If the column has info defined, we copy it and adjust any indices
# to point to the copied column. By guarding with the if statement,
# we avoid side effects (of creating the default info instance).
if "info" in col.__dict__:
newcol.info = col.info
if copy_indices and col.info.indices:
newcol.info.indices = deepcopy(col.info.indices)
for index in newcol.info.indices:
index.replace_col(col, newcol)
return newcol
class FalseArray(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError(
f"Cannot set any element of {type(self).__name__} class to True"
)
def _expand_string_array_for_values(arr, values):
"""
For string-dtype return a version of ``arr`` that is wide enough for ``values``.
If ``arr`` is not string-dtype or does not need expansion then return ``arr``.
Parameters
----------
arr : np.ndarray
Input array
values : scalar or array-like
Values for width comparison for string arrays
Returns
-------
arr_expanded : np.ndarray
"""
if arr.dtype.kind in ("U", "S") and values is not np.ma.masked:
# Find the length of the longest string in the new values.
values_str_len = np.char.str_len(values).max()
# Determine character repeat count of arr.dtype. Returns a positive
# int or None (something like 'U0' is not possible in numpy). If new values
# are longer than current then make a new (wider) version of arr.
arr_str_len = dtype_bytes_or_chars(arr.dtype)
if arr_str_len and values_str_len > arr_str_len:
arr_dtype = arr.dtype.byteorder + arr.dtype.kind + str(values_str_len)
arr = arr.astype(arr_dtype)
return arr
def _convert_sequence_data_to_array(data, dtype=None):
"""Convert N-d sequence-like data to ndarray or MaskedArray.
This is the core function for converting Python lists or list of lists to a
numpy array. This handles embedded np.ma.masked constants in ``data`` along
with the special case of an homogeneous list of MaskedArray elements.
Considerations:
- np.ma.array is about 50 times slower than np.array for list input. This
function avoids using np.ma.array on list input.
- np.array emits a UserWarning for embedded np.ma.masked, but only for int
or float inputs. For those it converts to np.nan and forces float dtype.
For other types np.array is inconsistent, for instance converting
np.ma.masked to "0.0" for str types.
- Searching in pure Python for np.ma.masked in ``data`` is comparable in
speed to calling ``np.array(data)``.
- This function may end up making two additional copies of input ``data``.
Parameters
----------
data : N-d sequence
Input data, typically list or list of lists
dtype : None or dtype-like
Output datatype (None lets np.array choose)
Returns
-------
np_data : np.ndarray or np.ma.MaskedArray
"""
np_ma_masked = np.ma.masked # Avoid repeated lookups of this object
# Special case of an homogeneous list of MaskedArray elements (see #8977).
# np.ma.masked is an instance of MaskedArray, so exclude those values.
if (
hasattr(data, "__len__")
and len(data) > 0
and all(
isinstance(val, np.ma.MaskedArray) and val is not np_ma_masked
for val in data
)
):
np_data = np.ma.array(data, dtype=dtype)
return np_data
# First convert data to a plain ndarray. If there are instances of np.ma.masked
# in the data this will issue a warning for int and float.
with warnings.catch_warnings(record=True) as warns:
# Ensure this warning from numpy is always enabled and that it is not
# converted to an error (which can happen during pytest).
warnings.filterwarnings(
"always", category=UserWarning, message=".*converting a masked element.*"
)
# FutureWarning in numpy 1.21. See https://github.com/astropy/astropy/issues/11291
# and https://github.com/numpy/numpy/issues/18425.
warnings.filterwarnings(
"always",
category=FutureWarning,
message=".*Promotion of numbers and bools to strings.*",
)
try:
np_data = np.array(data, dtype=dtype)
except np.ma.MaskError:
# Catches case of dtype=int with masked values, instead let it
# convert to float
np_data = np.array(data)
except Exception:
# Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity.
# First try to interpret the data as Quantity. If that still fails then fall
# through to object
try:
np_data = Quantity(data, dtype)
except Exception:
dtype = object
np_data = np.array(data, dtype=dtype)
if np_data.ndim == 0 or (np_data.ndim > 0 and len(np_data) == 0):
# Implies input was a scalar or an empty list (e.g. initializing an
# empty table with pre-declared names and dtypes but no data). Here we
# need to fall through to initializing with the original data=[].
return data
# If there were no warnings and the data are int or float, then we are done.
# Other dtypes like string or complex can have masked values and the
# np.array() conversion gives the wrong answer (e.g. converting np.ma.masked
# to the string "0.0").
if len(warns) == 0 and np_data.dtype.kind in ("i", "f"):
return np_data
# Now we need to determine if there is an np.ma.masked anywhere in input data.
# Make a statement like below to look for np.ma.masked in a nested sequence.
# Because np.array(data) succeeded we know that `data` has a regular N-d
# structure. Find ma_masked:
# any(any(any(d2 is ma_masked for d2 in d1) for d1 in d0) for d0 in data)
# Using this eval avoids creating a copy of `data` in the more-usual case of
# no masked elements.
any_statement = "d0 is ma_masked"
for ii in reversed(range(np_data.ndim)):
if ii == 0:
any_statement = f"any({any_statement} for d0 in data)"
elif ii == np_data.ndim - 1:
any_statement = f"any(d{ii} is ma_masked for d{ii} in d{ii-1})"
else:
any_statement = f"any({any_statement} for d{ii} in d{ii-1})"
context = {"ma_masked": np.ma.masked, "data": data}
has_masked = eval(any_statement, context)
# If there are any masks then explicitly change each one to a fill value and
# set a mask boolean array. If not has_masked then we're done.
if has_masked:
mask = np.zeros(np_data.shape, dtype=bool)
data_filled = np.array(data, dtype=object)
# Make type-appropriate fill value based on initial conversion.
if np_data.dtype.kind == "U":
fill = ""
elif np_data.dtype.kind == "S":
fill = b""
else:
# Zero works for every numeric type.
fill = 0
ranges = [range(dim) for dim in np_data.shape]
for idxs in itertools.product(*ranges):
val = data_filled[idxs]
if val is np_ma_masked:
data_filled[idxs] = fill
mask[idxs] = True
elif isinstance(val, bool) and dtype is None:
# If we see a bool and dtype not specified then assume bool for
# the entire array. Not perfect but in most practical cases OK.
# Unfortunately numpy types [False, 0] as int, not bool (and
# [False, np.ma.masked] => array([0.0, np.nan])).
dtype = bool
# If no dtype is provided then need to convert back to list so np.array
# does type autodetection.
if dtype is None:
data_filled = data_filled.tolist()
# Use np.array first to convert `data` to ndarray (fast) and then make
# masked array from an ndarray with mask (fast) instead of from `data`.
np_data = np.ma.array(np.array(data_filled, dtype=dtype), mask=mask)
return np_data
def _make_compare(oper):
"""
Make Column comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
Parameters
----------
oper : str
Operator name
"""
def _compare(self, other):
op = oper # copy enclosed ref to allow swap below
# If other is a Quantity, we should let it do the work, since
# it can deal with our possible unit (which, for MaskedColumn,
# would get dropped below, as '.data' is accessed in super()).
if isinstance(other, Quantity):
return NotImplemented
# If we are unicode and other is a column with bytes, defer to it for
# doing the unicode sandwich. This avoids problems like those
# discussed in #6838 and #6899.
if (
self.dtype.kind == "U"
and isinstance(other, Column)
and other.dtype.kind == "S"
):
return NotImplemented
# If we are bytes, encode other as needed.
if self.dtype.char == "S":
other = self._encode_str(other)
# Now just let the regular ndarray.__eq__, etc., take over.
result = getattr(super(Column, self), op)(other)
# But we should not return Column instances for this case.
return result.data if isinstance(result, Column) else result
return _compare
class ColumnInfo(BaseColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information.
"""
attr_names = BaseColumnInfo.attr_names | {"groups"}
_attrs_no_copy = BaseColumnInfo._attrs_no_copy | {"groups"}
attrs_from_parent = attr_names
_supports_indexing = True
# For structured columns, data is used to store a dict of columns.
# Store entries in that dict as name.key instead of name.data.key.
_represent_as_dict_primary_data = "data"
def _represent_as_dict(self):
result = super()._represent_as_dict()
names = self._parent.dtype.names
# For a regular column, we are done, but for a structured
# column, we use a SerializedColumns to store the pieces.
if names is None:
return result
from .serialize import SerializedColumn
data = SerializedColumn()
# If this column has a StructuredUnit, we split it and store
# it on the corresponding part. Otherwise, we just store it
# as an attribute below. All other attributes we remove from
# the parts, so that we do not store them multiple times.
# (Note that attributes are not linked to the parent, so it
# is safe to reset them.)
# TODO: deal with (some of) this in Column.__getitem__?
# Alternatively: should we store info on the first part?
# TODO: special-case format somehow? Can we have good formats
# for structured columns?
unit = self.unit
if isinstance(unit, StructuredUnit) and len(unit) == len(names):
units = unit.values()
unit = None # No need to store as an attribute as well.
else:
units = [None] * len(names)
for name, part_unit in zip(names, units):
part = Column(self._parent[name])
part.unit = part_unit
part.description = None
part.meta = {}
part.format = None
data[name] = part
# Create the attributes required to reconstruct the column.
result["data"] = data
# Store the shape if needed. Just like scalar data, a structured data
# column (e.g. with dtype `f8,i8`) can be multidimensional within each
# row and have a shape, and that needs to be distinguished from the
# case that each entry in the structure has the same shape (e.g.,
# distinguist a column with dtype='f8,i8' and 2 elements per row from
# one with dtype '2f8,2i8' and just one element per row).
if shape := self._parent.shape[1:]:
result["shape"] = list(shape)
# Also store the standard info attributes since these are
# stored on the parent and can thus just be passed on as
# arguments. TODO: factor out with essentially the same
# code in serialize._represent_mixin_as_column.
if unit is not None and unit != "":
result["unit"] = unit
if self.format is not None:
result["format"] = self.format
if self.description is not None:
result["description"] = self.description
if self.meta:
result["meta"] = self.meta
return result
def _construct_from_dict(self, map):
if not isinstance(map.get("data"), dict):
return super()._construct_from_dict(map)
# Reconstruct a structured Column, by first making an empty column
# and then filling it with the structured data.
data = map.pop("data")
shape = tuple(map.pop("shape", ()))
# There are three elements in the shape of `part`:
# (table length, shape of structured column, shape of part like '3f8')
# The column `shape` only includes the second, so by adding one to its
# length to include the table length, we pick off a possible last bit.
dtype = np.dtype(
[
(name, part.dtype, part.shape[len(shape) + 1 :])
for name, part in data.items()
]
)
units = tuple(col.info.unit for col in data.values())
if all(unit is not None for unit in units):
map["unit"] = StructuredUnit(units, dtype)
map.update(dtype=dtype, shape=shape, length=len(data[dtype.names[0]]))
# Construct the empty column from `map` (note: 'data' removed above).
result = super()._construct_from_dict(map)
# Fill it with the structured data.
for name in dtype.names:
result[name] = data[name]
return result
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Column instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Column (or subclass)
New instance of this class consistent with ``cols``
"""
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "unit", "format", "description")
)
return self._parent_cls(length=length, **attrs)
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Column this is just the column itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(
cls,
data=None,
name=None,
dtype=None,
shape=(),
length=0,
description=None,
unit=None,
format=None,
meta=None,
copy=False,
copy_indices=True,
):
if data is None:
self_data = np.zeros((length,) + shape, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, "_name"):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = data.meta
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = Quantity(data, unit, dtype=dtype, copy=copy).value
# If 'info' has been defined, copy basic properties (if needed).
if "info" in data.__dict__:
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = data.info.meta
else:
if np.dtype(dtype).char == "S":
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = None if name is None else str(name)
self._parent_table = None
self.unit = unit
self._format = format
self.description = description
self.meta = meta
self.indices = deepcopy(getattr(data, "indices", [])) if copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def value(self):
"""
An alias for the existing ``data`` attribute.
"""
return self.data
@property
def parent_table(self):
# Note: It seems there are some cases where _parent_table is not set,
# such after restoring from a pickled Column. Perhaps that should be
# fixed, but this is also okay for now.
if getattr(self, "_parent_table", None) is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order="C", data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# If there is meta on the original column then deepcopy (since "copy" of column
# implies complete independence from original). __array_finalize__ will have already
# made a light copy. I'm not sure how to avoid that initial light copy.
if self.meta is not None:
out.meta = self.meta # MetaData descriptor does a deepcopy here
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ("_name", "_unit", "_format", "description", "meta", "indices")
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super().__setstate__(state) gives
# "TypeError 'int' object is not iterable", raised in
# astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__()
# Previously, it seems to have given an infinite recursion.
# Hence, manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (
self.name,
self.unit,
self.format,
self.description,
self.meta,
self.indices,
)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, "indices"): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
if "info" in getattr(obj, "__dict__", {}):
self.info = obj.info
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super().__array_wrap__(out_arr, context)
if self.shape != out_arr.shape or (
isinstance(out_arr, BaseColumn)
and (context is not None and context[0] in _comparison_functions)
):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
if val is not None:
val = str(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def format(self):
"""
Format string for displaying values in this column.
"""
return self._format
@format.setter
def format(self, format_string):
prev_format = getattr(self, "_format", None)
self._format = format_string # set new format string
try:
# test whether it formats without error exemplarily
self.pformat(max_lines=1)
except Exception as err:
# revert to restore previous format if there was one
self._format = prev_format
raise ValueError(
"Invalid format for column '{}': could not display "
"values in this column using this format".format(self.name)
) from err
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
yield from _pformat_col_iter(
self, -1, show_name=False, show_unit=False, show_dtype=False, outs={}
)
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : bool
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError("Comparison `col` must be a Column or MaskedColumn object")
attrs = ("name", "unit", "dtype", "format", "description", "meta")
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(
self,
max_lines=None,
show_name=True,
show_unit=False,
show_dtype=False,
html=False,
):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(
self,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
html=html,
)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(
self,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
n_header = outs["n_header"]
for i, line in enumerate(lines):
if i < n_header:
color_print(line, "red")
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(
self, max_lines=max_lines, show_name=show_name, show_unit=show_unit
)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict="silent")
@unit.deleter
def unit(self):
self._unit = None
def searchsorted(self, v, side="left", sorter=None):
# For bytes type data, encode the `v` value as UTF-8 (if necessary) before
# calling searchsorted. This prevents a factor of 1000 slowdown in
# searchsorted in this case.
a = self.data
if a.dtype.kind == "S" and not isinstance(v, bytes):
v = np.asarray(v)
if v.dtype.kind == "U":
v = np.char.encode(v, "utf-8")
return np.searchsorted(a, v, side=side, sorter=sorter)
searchsorted.__doc__ = np.ndarray.searchsorted.__doc__
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, "_groups"):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``.
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``.
"""
if self.parent_table:
if hasattr(self.parent_table, "_groups"):
out._groups = groups.ColumnGroups(
out, indices=self.parent_table._groups._indices
)
elif hasattr(self, "_groups"):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(
self, self.unit, copy=False, dtype=self.dtype, order="A", subok=True
)
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : unit-like
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of tuple
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self.
"""
for attr in ("name", "unit", "_format", "description"):
val = getattr(obj, attr, None)
setattr(self, attr, val)
# Light copy of meta if it is not empty
obj_meta = getattr(obj, "meta", None)
if obj_meta:
self.meta = obj_meta.copy()
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode("utf-8")
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
arr = np.asarray(value)
if arr.dtype.char == "U":
arr = np.char.encode(arr, encoding="utf-8")
if isinstance(value, np.ma.MaskedArray):
arr = np.ma.array(arr, mask=value.mask, copy=False)
value = arr
return value
def tolist(self):
if self.dtype.kind == "S":
return np.chararray.decode(self, encoding="utf-8").tolist()
else:
return super().tolist()
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray, or None
Column data values
name : str
Column name and key for reference within Table
dtype : `~numpy.dtype`-like
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str, None, or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
To access the ``Column`` data as a raw `numpy.ndarray` object, you can use
one of the ``data`` or ``value`` attributes (which are equivalent)::
col.data
col.value
"""
def __new__(
cls,
data=None,
name=None,
dtype=None,
shape=(),
length=0,
description=None,
unit=None,
format=None,
meta=None,
copy=False,
copy_indices=True,
):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError(
"Cannot convert a MaskedColumn with masked value to a Column"
)
self = super().__new__(
cls,
data=data,
name=name,
dtype=dtype,
shape=shape,
length=length,
description=description,
unit=unit,
format=format,
meta=meta,
copy=copy,
copy_indices=copy_indices,
)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError(
"cannot set mask value to a column in non-masked Table"
)
super().__setattr__(item, value)
if item == "unit" and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (
("name", self.name),
("dtype", dtype_info_name(self.dtype)),
("shape", shape),
("unit", unit),
("format", self.format),
("description", self.description),
("length", len(self)),
):
if val is not None:
descr_vals.append(f"{attr}={val!r}")
descr = "<" + " ".join(descr_vals) + ">\n"
if html:
from astropy.utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html
)
out = descr + "\n".join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return "\n".join(lines)
def __bytes__(self):
return str(self).encode("utf-8")
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = dtype_bytes_or_chars(self.dtype)
if value_str_len > self_str_len:
warnings.warn(
"truncated right side string(s) longer than {} "
"character(s) during assignment".format(self_str_len),
StringTruncateWarning,
stacklevel=3,
)
def __setitem__(self, index, value):
if self.dtype.char == "S":
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
__eq__ = _make_compare("__eq__")
__ne__ = _make_compare("__ne__")
__gt__ = _make_compare("__gt__")
__lt__ = _make_compare("__lt__")
__ge__ = _make_compare("__ge__")
__le__ = _make_compare("__le__")
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different from
that of the column, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == "O":
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
self_for_insert = _expand_string_array_for_values(self, values)
data = np.insert(self_for_insert, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumnInfo(ColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information. In this case
it just adds the ``mask_val`` attribute.
"""
# Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. See also code below.
attr_names = ColumnInfo.attr_names | {"serialize_method"}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = "data"
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {
"fits": "null_value",
"ecsv": "null_value",
"hdf5": "data_mask",
"parquet": "data_mask",
None: "null_value",
}
def _represent_as_dict(self):
out = super()._represent_as_dict()
# If we are a structured masked column, then our parent class,
# ColumnInfo, will already have set up a dict with masked parts,
# which will be serialized later, so no further work needed here.
if self._parent.dtype.names is not None:
return out
col = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == "data_mask":
# Note: a driver here is a performance issue in #8443 where repr() of a
# np.ma.MaskedArray value is up to 10 times slower than repr of a normal array
# value. So regardless of whether there are masked elements it is useful to
# explicitly define this as a serialized column and use col.data.data (ndarray)
# instead of letting it fall through to the "standard" serialization machinery.
out["data"] = col.data.data
if np.any(col.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out["mask"] = col.mask
elif method == "null_value":
pass
else:
raise ValueError(
'serialize method must be either "data_mask" or "null_value"'
)
return out
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray, or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str, or None
Value used when filling masked column elements
dtype : `~numpy.dtype`-like
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str, None, or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
To access the ``Column`` data as a raw `numpy.ma.MaskedArray` object, you can
use one of the ``data`` or ``value`` attributes (which are equivalent)::
col.data
col.value
"""
info = MaskedColumnInfo()
def __new__(
cls,
data=None,
name=None,
mask=None,
fill_value=None,
dtype=None,
shape=(),
length=0,
description=None,
unit=None,
format=None,
meta=None,
copy=False,
copy_indices=True,
):
if mask is None:
# If mask is None then we need to determine the mask (if any) from the data.
# The naive method is looking for a mask attribute on data, but this can fail,
# see #8816. Instead use ``MaskedArray`` to do the work.
mask = ma.MaskedArray(data).mask
if mask is np.ma.nomask:
# Handle odd-ball issue with np.ma.nomask (numpy #13758), and see below.
mask = False
elif copy:
mask = mask.copy()
elif mask is np.ma.nomask:
# Force the creation of a full mask array as nomask is tricky to
# use and will fail in an unexpected manner when setting a value
# to the mask.
mask = False
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(
data,
dtype=dtype,
shape=shape,
length=length,
name=name,
unit=unit,
format=format,
description=description,
meta=meta,
copy=copy,
copy_indices=copy_indices,
)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# The above process preserves info relevant for Column, but this does
# not include serialize_method (and possibly other future attributes)
# relevant for MaskedColumn, so we set info explicitly.
if "info" in getattr(data, "__dict__", {}):
self.info = data.info
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds.
if fill_value is None:
data_fill_value = getattr(data, "fill_value", None)
if (
data_fill_value is not None
and data_fill_value != np.ma.default_fill_value(data.dtype)
):
fill_value = np.array(data_fill_value, self.dtype)[()]
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work.
"""
# another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
"""The plain MaskedArray data held by this column."""
out = self.view(np.ma.MaskedArray)
# By default, a MaskedArray view will set the _baseclass to be the
# same as that of our own class, i.e., BaseColumn. Since we want
# to return a plain MaskedArray, we reset the baseclass accordingly.
out._baseclass = np.ndarray
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
data = super().filled(fill_value)
# Use parent table definition of Column if available
column_cls = (
self.parent_table.Column if (self.parent_table is not None) else Column
)
out = column_cls(
name=self.name,
data=data,
unit=self.unit,
format=self.format,
description=self.description,
meta=deepcopy(self.meta),
)
return out
def insert(self, obj, values, mask=None, axis=0):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different from
that of the column, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately.
mask : bool or array-like
Mask value(s) to insert. If not supplied, and values does not have
a mask either, then False is used.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == "O":
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=axis)
new_data[obj] = values
else:
self_ma = _expand_string_array_for_values(self_ma, values)
new_data = np.insert(self_ma.data, obj, values, axis=axis)
if mask is None:
mask = getattr(values, "mask", np.ma.nomask)
if mask is np.ma.nomask:
if self.dtype.kind == "O":
mask = False
else:
mask = np.zeros(np.shape(values), dtype=bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
out.fill_value = self.fill_value
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
# TODO: this part is essentially the same as what is done in
# __array_finalize__ and could probably be called directly in our
# override of __getitem__ in _columns_mixins.pyx). Refactor?
if "info" in self.__dict__:
out.info = self.info
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if self.dtype.char == "S":
value = self._encode_str(value)
if issubclass(self.dtype.type, np.character):
# Account for a bug in np.ma.MaskedArray setitem.
# https://github.com/numpy/numpy/issues/8624
value = np.ma.asanyarray(value, dtype=self.dtype.type)
# Check for string truncation after filling masked items with
# empty (zero-length) string. Note that filled() does not make
# a copy if there are no masked items.
self._check_string_truncate(value.filled(""))
# update indices
self.info.adjust_indices(index, value, len(self))
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
d1e43ddf88b4898658d29210aeca47eacc5c8402dc234591e31c990980edcac4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import fnmatch
import os
import re
import sys
import numpy as np
from astropy import log
from astropy.utils.console import Getch, color_print, conf, terminal_size
from astropy.utils.data_info import dtype_info_name
__all__ = []
def default_format_func(format_, val):
if isinstance(val, bytes):
return val.decode("utf-8", errors="replace")
else:
return str(val)
# The first three functions are helpers for _auto_format_func
def _use_str_for_masked_values(format_func):
"""Wrap format function to trap masked values.
String format functions and most user functions will not be able to deal
with masked values, so we wrap them to ensure they are passed to str().
"""
return lambda format_, val: (
str(val) if val is np.ma.masked else format_func(format_, val)
)
def _possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
"""
yield lambda format_, val: format(val, format_)
yield lambda format_, val: format_.format(val)
yield lambda format_, val: format_ % val
yield lambda format_, val: format_.format(**{k: val[k] for k in val.dtype.names})
def get_auto_format_func(
col=None, possible_string_format_functions=_possible_string_format_functions
):
"""
Return a wrapped ``auto_format_func`` function which is used in
formatting table columns. This is primarily an internal function but
gets used directly in other parts of astropy, e.g. `astropy.io.ascii`.
Parameters
----------
col_name : object, optional
Hashable object to identify column like id or name. Default is None.
possible_string_format_functions : func, optional
Function that yields possible string formatting functions
(defaults to internal function to do this).
Returns
-------
Wrapped ``auto_format_func`` function
"""
def _auto_format_func(format_, val):
"""Format ``val`` according to ``format_`` for a plain format specifier,
old- or new-style format strings, or using a user supplied function.
More importantly, determine and cache (in _format_funcs) a function
that will do this subsequently. In this way this complicated logic is
only done for the first value.
Returns the formatted value.
"""
if format_ is None:
return default_format_func(format_, val)
if format_ in col.info._format_funcs:
return col.info._format_funcs[format_](format_, val)
if callable(format_):
format_func = lambda format_, val: format_(val)
try:
out = format_func(format_, val)
if not isinstance(out, str):
raise ValueError(
"Format function for value {} returned {} "
"instead of string type".format(val, type(val))
)
except Exception as err:
# For a masked element, the format function call likely failed
# to handle it. Just return the string representation for now,
# and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
raise ValueError(f"Format function for value {val} failed.") from err
# If the user-supplied function handles formatting masked elements, use
# it directly. Otherwise, wrap it in a function that traps them.
try:
format_func(format_, np.ma.masked)
except Exception:
format_func = _use_str_for_masked_values(format_func)
else:
# For a masked element, we cannot set string-based format functions yet,
# as all tests below will fail. Just return the string representation
# of masked for now, and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
for format_func in possible_string_format_functions(format_):
try:
# Does this string format method work?
out = format_func(format_, val)
# Require that the format statement actually did something.
if out == format_:
raise ValueError("the format passed in did nothing.")
except Exception:
continue
else:
break
else:
# None of the possible string functions passed muster.
raise ValueError(
f"unable to parse format string {format_} for its column."
)
# String-based format functions will fail on masked elements;
# wrap them in a function that traps them.
format_func = _use_str_for_masked_values(format_func)
col.info._format_funcs[format_] = format_func
return out
return _auto_format_func
def _get_pprint_include_names(table):
"""Get the set of names to show in pprint from the table pprint_include_names
and pprint_exclude_names attributes.
These may be fnmatch unix-style globs.
"""
def get_matches(name_globs, default):
match_names = set()
if name_globs: # For None or () use the default
for name in table.colnames:
for name_glob in name_globs:
if fnmatch.fnmatch(name, name_glob):
match_names.add(name)
break
else:
match_names.update(default)
return match_names
include_names = get_matches(table.pprint_include_names(), table.colnames)
exclude_names = get_matches(table.pprint_exclude_names(), [])
return include_names - exclude_names
class TableFormatter:
@staticmethod
def _get_pprint_size(max_lines=None, max_width=None):
"""Get the output size (number of lines and character width) for Column and
Table pformat/pprint methods.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be determined
using the ``astropy.table.conf.max_lines`` configuration item. If a
negative value of ``max_lines`` is supplied then there is no line
limit applied.
The same applies for max_width except the configuration item is
``astropy.table.conf.max_width``.
Parameters
----------
max_lines : int or None
Maximum lines of output (header + data rows)
max_width : int or None
Maximum width (characters) output
Returns
-------
max_lines, max_width : int
"""
# Declare to keep static type checker happy.
lines = None
width = None
if max_lines is None:
max_lines = conf.max_lines
if max_width is None:
max_width = conf.max_width
if max_lines is None or max_width is None:
lines, width = terminal_size()
if max_lines is None:
max_lines = lines
elif max_lines < 0:
max_lines = sys.maxsize
if max_lines < 8:
max_lines = 8
if max_width is None:
max_width = width
elif max_width < 0:
max_width = sys.maxsize
if max_width < 10:
max_width = 10
return max_lines, max_width
def _pformat_col(
self,
col,
max_lines=None,
show_name=True,
show_unit=None,
show_dtype=False,
show_length=None,
html=False,
align=None,
):
"""Return a list of formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
html : bool
Output column as HTML
align : str
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively.
Returns
-------
lines : list
List of lines with formatted column values
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
if show_unit is None:
show_unit = col.info.unit is not None
outs = {} # Some values from _pformat_col_iter iterator that are needed here
col_strs_iter = self._pformat_col_iter(
col,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
show_length=show_length,
outs=outs,
)
# Replace tab and newline with text representations so they display nicely.
# Newline in particular is a problem in a multicolumn table.
col_strs = [
val.replace("\t", "\\t").replace("\n", "\\n") for val in col_strs_iter
]
if len(col_strs) > 0:
col_width = max(len(x) for x in col_strs)
if html:
from astropy.utils.xml.writer import xml_escape
n_header = outs["n_header"]
for i, col_str in enumerate(col_strs):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = "th" if i < n_header else "td"
val = f"<{td}>{xml_escape(col_str.strip())}</{td}>"
row = "<tr>" + val + "</tr>"
if i < n_header:
row = "<thead>" + row + "</thead>"
col_strs[i] = row
if n_header > 0:
# Get rid of '---' header line
col_strs.pop(n_header - 1)
col_strs.insert(0, "<table>")
col_strs.append("</table>")
# Now bring all the column string values to the same fixed width
else:
col_width = max(len(x) for x in col_strs) if col_strs else 1
# Center line header content and generate dashed headerline
for i in outs["i_centers"]:
col_strs[i] = col_strs[i].center(col_width)
if outs["i_dashes"] is not None:
col_strs[outs["i_dashes"]] = "-" * col_width
# Format columns according to alignment. `align` arg has precedent, otherwise
# use `col.format` if it starts as a legal alignment string. If neither applies
# then right justify.
re_fill_align = re.compile(r"(?P<fill>.?)(?P<align>[<^>=])")
match = None
if align:
# If there is an align specified then it must match
match = re_fill_align.match(align)
if not match:
raise ValueError(
"column align must be one of '<', '^', '>', or '='"
)
elif isinstance(col.info.format, str):
# col.info.format need not match, in which case rjust gets used
match = re_fill_align.match(col.info.format)
if match:
fill_char = match.group("fill")
align_char = match.group("align")
if align_char == "=":
if fill_char != "0":
raise ValueError("fill character must be '0' for '=' align")
# str.zfill gets used which does not take fill char arg
fill_char = ""
else:
fill_char = ""
align_char = ">"
justify_methods = {"<": "ljust", "^": "center", ">": "rjust", "=": "zfill"}
justify_method = justify_methods[align_char]
justify_args = (col_width, fill_char) if fill_char else (col_width,)
for i, col_str in enumerate(col_strs):
col_strs[i] = getattr(col_str, justify_method)(*justify_args)
if outs["show_length"]:
col_strs.append(f"Length = {len(col)} rows")
return col_strs, outs
def _name_and_structure(self, name, dtype, sep=" "):
"""Format a column name, including a possible structure.
Normally, just returns the name, but if it has a structured dtype,
will add the parts in between square brackets. E.g.,
"name [f0, f1]" or "name [f0[sf0, sf1], f1]".
"""
if dtype is None or dtype.names is None:
return name
structure = ", ".join(
[
self._name_and_structure(name, dt, sep="")
for name, (dt, _) in dtype.fields.items()
]
)
return f"{name}{sep}[{structure}]"
def _pformat_col_iter(
self,
col,
max_lines,
show_name,
show_unit,
outs,
show_dtype=False,
show_length=None,
):
"""Iterator which yields formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
outs : dict
Must be a dict which is used to pass back additional values
defined within the iterator.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
"""
max_lines, _ = self._get_pprint_size(max_lines, -1)
dtype = getattr(col, "dtype", None)
multidims = getattr(col, "shape", [0])[1:]
if multidims:
multidim0 = tuple(0 for n in multidims)
multidim1 = tuple(n - 1 for n in multidims)
multidims_all_ones = np.prod(multidims) == 1
multidims_has_zero = 0 in multidims
i_dashes = None
i_centers = [] # Line indexes where content should be centered
n_header = 0
if show_name:
i_centers.append(n_header)
# Get column name (or 'None' if not set)
col_name = str(col.info.name)
n_header += 1
yield self._name_and_structure(col_name, dtype)
if show_unit:
i_centers.append(n_header)
n_header += 1
yield str(col.info.unit or "")
if show_dtype:
i_centers.append(n_header)
n_header += 1
if dtype is not None:
col_dtype = dtype_info_name((dtype, multidims))
else:
col_dtype = col.__class__.__qualname__ or "object"
yield col_dtype
if show_unit or show_name or show_dtype:
i_dashes = n_header
n_header += 1
yield "---"
max_lines -= n_header
n_print2 = max_lines // 2
n_rows = len(col)
# This block of code is responsible for producing the function that
# will format values for this column. The ``format_func`` function
# takes two args (col_format, val) and returns the string-formatted
# version. Some points to understand:
#
# - col_format could itself be the formatting function, so it will
# actually end up being called with itself as the first arg. In
# this case the function is expected to ignore its first arg.
#
# - auto_format_func is a function that gets called on the first
# column value that is being formatted. It then determines an
# appropriate formatting function given the actual value to be
# formatted. This might be deterministic or it might involve
# try/except. The latter allows for different string formatting
# options like %f or {:5.3f}. When auto_format_func is called it:
# 1. Caches the function in the _format_funcs dict so for subsequent
# values the right function is called right away.
# 2. Returns the formatted value.
#
# - possible_string_format_functions is a function that yields a
# succession of functions that might successfully format the
# value. There is a default, but Mixin methods can override this.
# See Quantity for an example.
#
# - get_auto_format_func() returns a wrapped version of auto_format_func
# with the column id and possible_string_format_functions as
# enclosed variables.
col_format = col.info.format or getattr(col.info, "default_format", None)
pssf = (
getattr(col.info, "possible_string_format_functions", None)
or _possible_string_format_functions
)
auto_format_func = get_auto_format_func(col, pssf)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
if len(col) > max_lines:
if show_length is None:
show_length = True
i0 = n_print2 - (1 if show_length else 0)
i1 = n_rows - n_print2 - max_lines % 2
indices = np.concatenate(
[np.arange(0, i0 + 1), np.arange(i1 + 1, len(col))]
)
else:
i0 = -1
indices = np.arange(len(col))
def format_col_str(idx):
if multidims:
# Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')
# with shape (n,1,...,1) from being printed as if there was
# more than one element in a row
if multidims_all_ones:
return format_func(col_format, col[(idx,) + multidim0])
elif multidims_has_zero:
# Any zero dimension means there is no data to print
return ""
else:
left = format_func(col_format, col[(idx,) + multidim0])
right = format_func(col_format, col[(idx,) + multidim1])
return f"{left} .. {right}"
else:
return format_func(col_format, col[idx])
# Add formatted values if within bounds allowed by max_lines
for idx in indices:
if idx == i0:
yield "..."
else:
try:
yield format_col_str(idx)
except ValueError:
raise ValueError(
'Unable to parse format string "{}" for entry "{}" '
'in column "{}"'.format(col_format, col[idx], col.info.name)
)
outs["show_length"] = show_length
outs["n_header"] = n_header
outs["i_centers"] = i_centers
outs["i_dashes"] = i_dashes
def _pformat_table(
self,
table,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
tableclass=None,
align=None,
):
"""Return a list of lines for the formatted string representation of
the table.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is to False.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(table)
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
none
align : str or list or tuple
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively. A list of strings can be
provided for alignment of tables with multiple columns.
Returns
-------
rows : list
Formatted table as a list of strings
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
# "Print" all the values into temporary lists by column for subsequent
# use and to determine the width
max_lines, max_width = self._get_pprint_size(max_lines, max_width)
if show_unit is None:
show_unit = any(col.info.unit for col in table.columns.values())
# Coerce align into a correctly-sized list of alignments (if possible)
n_cols = len(table.columns)
if align is None or isinstance(align, str):
align = [align] * n_cols
elif isinstance(align, (list, tuple)):
if len(align) != n_cols:
raise ValueError(
"got {} alignment values instead of "
"the number of columns ({})".format(len(align), n_cols)
)
else:
raise TypeError(
f"align keyword must be str or list or tuple (got {type(align)})"
)
# Process column visibility from table pprint_include_names and
# pprint_exclude_names attributes and get the set of columns to show.
pprint_include_names = _get_pprint_include_names(table)
cols = []
outs = None # Initialize so static type checker is happy
for align_, col in zip(align, table.columns.values()):
if col.info.name not in pprint_include_names:
continue
lines, outs = self._pformat_col(
col,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
align=align_,
)
if outs["show_length"]:
lines = lines[:-1]
cols.append(lines)
if not cols:
return ["<No columns>"], {"show_length": False}
# Use the values for the last column since they are all the same
n_header = outs["n_header"]
n_rows = len(cols[0])
def outwidth(cols):
return sum(len(c[0]) for c in cols) + len(cols) - 1
dots_col = ["..."] * n_rows
middle = len(cols) // 2
while outwidth(cols) > max_width:
if len(cols) == 1:
break
if len(cols) == 2:
cols[1] = dots_col
break
if cols[middle] is dots_col:
cols.pop(middle)
middle = len(cols) // 2
cols[middle] = dots_col
# Now "print" the (already-stringified) column values into a
# row-oriented list.
rows = []
if html:
from astropy.utils.xml.writer import xml_escape
if tableid is None:
tableid = f"table{id(table)}"
if tableclass is not None:
if isinstance(tableclass, list):
tableclass = " ".join(tableclass)
rows.append(f'<table id="{tableid}" class="{tableclass}">')
else:
rows.append(f'<table id="{tableid}">')
for i in range(n_rows):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = "th" if i < n_header else "td"
vals = (f"<{td}>{xml_escape(col[i].strip())}</{td}>" for col in cols)
row = "<tr>" + "".join(vals) + "</tr>"
if i < n_header:
row = "<thead>" + row + "</thead>"
rows.append(row)
rows.append("</table>")
else:
for i in range(n_rows):
row = " ".join(col[i] for col in cols)
rows.append(row)
return rows, outs
def _more_tabcol(
self,
tabcol,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
):
"""Interactive "more" of a table or column.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
allowed_keys = "f br<>qhpn"
# Count the header lines
n_header = 0
if show_name:
n_header += 1
if show_unit:
n_header += 1
if show_dtype:
n_header += 1
if show_name or show_unit or show_dtype:
n_header += 1
# Set up kwargs for pformat call. Only Table gets max_width.
kwargs = dict(
max_lines=-1,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
if hasattr(tabcol, "columns"): # tabcol is a table
kwargs["max_width"] = max_width
# If max_lines is None (=> query screen size) then increase by 2.
# This is because get_pprint_size leaves 6 extra lines so that in
# ipython you normally see the last input line.
max_lines1, max_width = self._get_pprint_size(max_lines, max_width)
if max_lines is None:
max_lines1 += 2
delta_lines = max_lines1 - n_header
# Set up a function to get a single character on any platform
inkey = Getch()
i0 = 0 # First table/column row to show
showlines = True
while True:
i1 = i0 + delta_lines # Last table/col row to show
if showlines: # Don't always show the table (e.g. after help)
try:
os.system("cls" if os.name == "nt" else "clear")
except Exception:
pass # No worries if clear screen call fails
lines = tabcol[i0:i1].pformat(**kwargs)
colors = (
"red" if i < n_header else "default" for i in range(len(lines))
)
for color, line in zip(colors, lines):
color_print(line, color)
showlines = True
print()
print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=" ")
# Get a valid key
while True:
try:
key = inkey().lower()
except Exception:
print("\n")
log.error(
"Console does not support getting a character"
" as required by more(). Use pprint() instead."
)
return
if key in allowed_keys:
break
print(key)
if key.lower() == "q":
break
elif key == " " or key == "f":
i0 += delta_lines
elif key == "b":
i0 = i0 - delta_lines
elif key == "r":
pass
elif key == "<":
i0 = 0
elif key == ">":
i0 = len(tabcol)
elif key == "p":
i0 -= 1
elif key == "n":
i0 += 1
elif key == "h":
showlines = False
print(
"""
Browsing keys:
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help""",
end=" ",
)
if i0 < 0:
i0 = 0
if i0 >= len(tabcol) - delta_lines:
i0 = len(tabcol) - delta_lines
print("\n")
|
f21f7f2b9251c1c5ccba663f1273ddf0c4b181bbb8d6555994c289c88f49a6eb | """High-level table operations.
- join()
- setdiff()
- hstack()
- vstack()
- dstack()
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
import itertools
from collections import Counter, OrderedDict
from collections.abc import Mapping, Sequence
from copy import deepcopy
import numpy as np
from astropy.units import Quantity
from astropy.utils import metadata
from astropy.utils.masked import Masked
from . import _np_utils
from .np_utils import TableMergeError
from .table import Column, MaskedColumn, QTable, Row, Table
__all__ = [
"join",
"setdiff",
"hstack",
"vstack",
"unique",
"join_skycoord",
"join_distance",
]
__doctest_requires__ = {"join_skycoord": ["scipy"], "join_distance": ["scipy"]}
def _merge_table_meta(out, tables, metadata_conflicts="warn"):
out_meta = deepcopy(tables[0].meta)
for table in tables[1:]:
out_meta = metadata.merge(
out_meta, table.meta, metadata_conflicts=metadata_conflicts
)
out.meta.update(out_meta)
def _get_list_of_tables(tables):
"""
Check that tables is a Table or sequence of Tables. Returns the
corresponding list of Tables.
"""
# Make sure we have a list of things
if not isinstance(tables, Sequence):
tables = [tables]
# Make sure there is something to stack
if len(tables) == 0:
raise ValueError("no values provided to stack.")
# Convert inputs (Table, Row, or anything column-like) to Tables.
# Special case that Quantity converts to a QTable.
for ii, val in enumerate(tables):
if isinstance(val, Table):
pass
elif isinstance(val, Row):
tables[ii] = Table(val)
elif isinstance(val, Quantity):
tables[ii] = QTable([val])
else:
try:
tables[ii] = Table([val])
except (ValueError, TypeError) as err:
raise TypeError(f"Cannot convert {val} to table column.") from err
return tables
def _get_out_class(objs):
"""
From a list of input objects ``objs`` get merged output object class.
This is just taken as the deepest subclass. This doesn't handle complicated
inheritance schemes, but as a special case, classes which share ``info``
are taken to be compatible.
"""
out_class = objs[0].__class__
for obj in objs[1:]:
if issubclass(obj.__class__, out_class):
out_class = obj.__class__
if any(
not (
issubclass(out_class, obj.__class__) or out_class.info is obj.__class__.info
)
for obj in objs
):
raise ValueError(
f"unmergeable object classes {[type(obj).__name__ for obj in objs]}"
)
return out_class
def join_skycoord(distance, distance_func="search_around_sky"):
"""Helper function to join on SkyCoord columns using distance matching.
This function is intended for use in ``table.join()`` to allow performing a
table join where the key columns are both ``SkyCoord`` objects, matched by
computing the distance between points and accepting values below
``distance``.
The distance cross-matching is done using either
`~astropy.coordinates.search_around_sky` or
`~astropy.coordinates.search_around_3d`, depending on the value of
``distance_func``. The default is ``'search_around_sky'``.
One can also provide a function object for ``distance_func``, in which case
it must be a function that follows the same input and output API as
`~astropy.coordinates.search_around_sky`. In this case the function will
be called with ``(skycoord1, skycoord2, distance)`` as arguments.
Parameters
----------
distance : `~astropy.units.Quantity` ['angle', 'length']
Maximum distance between points to be considered a join match.
Must have angular or distance units.
distance_func : str or function
Specifies the function for performing the cross-match based on
``distance``. If supplied as a string this specifies the name of a
function in `astropy.coordinates`. If supplied as a function then that
function is called directly.
Returns
-------
join_func : function
Function that accepts two ``SkyCoord`` columns (col1, col2) and returns
the tuple (ids1, ids2) of pair-matched unique identifiers.
Examples
--------
This example shows an inner join of two ``SkyCoord`` columns, taking any
sources within 0.2 deg to be a match. Note the new ``sc_id`` column which
is added and provides a unique source identifier for the matches.
>>> from astropy.coordinates import SkyCoord
>>> import astropy.units as u
>>> from astropy.table import Table, join_skycoord
>>> from astropy import table
>>> sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg')
>>> sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg')
>>> join_func = join_skycoord(0.2 * u.deg)
>>> join_func(sc1, sc2) # Associate each coordinate with unique source ID
(array([3, 1, 1, 2]), array([4, 1, 2]))
>>> t1 = Table([sc1], names=['sc'])
>>> t2 = Table([sc2], names=['sc'])
>>> t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)})
>>> print(t12) # Note new `sc_id` column with the IDs from join_func()
sc_id sc_1 sc_2
deg,deg deg,deg
----- ------- --------
1 1.0,0.0 1.05,0.0
1 1.1,0.0 1.05,0.0
2 2.0,0.0 2.1,0.0
"""
if isinstance(distance_func, str):
import astropy.coordinates as coords
try:
distance_func = getattr(coords, distance_func)
except AttributeError as err:
raise ValueError(
"distance_func must be a function in astropy.coordinates"
) from err
else:
from inspect import isfunction
if not isfunction(distance_func):
raise ValueError("distance_func must be a str or function")
def join_func(sc1, sc2):
# Call the appropriate SkyCoord method to find pairs within distance
idxs1, idxs2, d2d, d3d = distance_func(sc1, sc2, distance)
# Now convert that into unique identifiers for each near-pair. This is
# taken to be transitive, so that if points 1 and 2 are "near" and points
# 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier.
# This identifier will then be used in the table join matching.
# Identifiers for each column, initialized to all zero.
ids1 = np.zeros(len(sc1), dtype=int)
ids2 = np.zeros(len(sc2), dtype=int)
# Start the identifier count at 1
id_ = 1
for idx1, idx2 in zip(idxs1, idxs2):
# If this col1 point is previously identified then set corresponding
# col2 point to same identifier. Likewise for col2 and col1.
if ids1[idx1] > 0:
ids2[idx2] = ids1[idx1]
elif ids2[idx2] > 0:
ids1[idx1] = ids2[idx2]
else:
# Not yet seen so set identifier for col1 and col2
ids1[idx1] = id_
ids2[idx2] = id_
id_ += 1
# Fill in unique identifiers for points with no near neighbor
for ids in (ids1, ids2):
for idx in np.flatnonzero(ids == 0):
ids[idx] = id_
id_ += 1
# End of enclosure join_func()
return ids1, ids2
return join_func
def join_distance(distance, kdtree_args=None, query_args=None):
"""Helper function to join table columns using distance matching.
This function is intended for use in ``table.join()`` to allow performing
a table join where the key columns are matched by computing the distance
between points and accepting values below ``distance``. This numerical
"fuzzy" match can apply to 1-D or 2-D columns, where in the latter case
the distance is a vector distance.
The distance cross-matching is done using `scipy.spatial.cKDTree`. If
necessary you can tweak the default behavior by providing ``dict`` values
for the ``kdtree_args`` or ``query_args``.
Parameters
----------
distance : float or `~astropy.units.Quantity` ['length']
Maximum distance between points to be considered a join match
kdtree_args : dict, None
Optional extra args for `~scipy.spatial.cKDTree`
query_args : dict, None
Optional extra args for `~scipy.spatial.cKDTree.query_ball_tree`
Returns
-------
join_func : function
Function that accepts (skycoord1, skycoord2) and returns the tuple
(ids1, ids2) of pair-matched unique identifiers.
Examples
--------
>>> from astropy.table import Table, join_distance
>>> from astropy import table
>>> c1 = [0, 1, 1.1, 2]
>>> c2 = [0.5, 1.05, 2.1]
>>> t1 = Table([c1], names=['col'])
>>> t2 = Table([c2], names=['col'])
>>> t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_distance(0.2)})
>>> print(t12)
col_id col_1 col_2
------ ----- -----
1 1.0 1.05
1 1.1 1.05
2 2.0 2.1
3 0.0 --
4 -- 0.5
"""
try:
from scipy.spatial import cKDTree
except ImportError as exc:
raise ImportError("scipy is required to use join_distance()") from exc
if kdtree_args is None:
kdtree_args = {}
if query_args is None:
query_args = {}
def join_func(col1, col2):
if col1.ndim > 2 or col2.ndim > 2:
raise ValueError("columns for isclose_join must be 1- or 2-dimensional")
if isinstance(distance, Quantity):
# Convert to np.array with common unit
col1 = col1.to_value(distance.unit)
col2 = col2.to_value(distance.unit)
dist = distance.value
else:
# Convert to np.array to allow later in-place shape changing
col1 = np.asarray(col1)
col2 = np.asarray(col2)
dist = distance
# Ensure columns are pure np.array and are 2-D for use with KDTree
if col1.ndim == 1:
col1.shape = col1.shape + (1,)
if col2.ndim == 1:
col2.shape = col2.shape + (1,)
# Cross-match col1 and col2 within dist using KDTree
kd1 = cKDTree(col1, **kdtree_args)
kd2 = cKDTree(col2, **kdtree_args)
nears = kd1.query_ball_tree(kd2, r=dist, **query_args)
# Output of above is nears which is a list of lists, where the outer
# list corresponds to each item in col1, and where the inner lists are
# indexes into col2 of elements within the distance tolerance. This
# identifies col1 / col2 near pairs.
# Now convert that into unique identifiers for each near-pair. This is
# taken to be transitive, so that if points 1 and 2 are "near" and points
# 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier.
# This identifier will then be used in the table join matching.
# Identifiers for each column, initialized to all zero.
ids1 = np.zeros(len(col1), dtype=int)
ids2 = np.zeros(len(col2), dtype=int)
# Start the identifier count at 1
id_ = 1
for idx1, idxs2 in enumerate(nears):
for idx2 in idxs2:
# If this col1 point is previously identified then set corresponding
# col2 point to same identifier. Likewise for col2 and col1.
if ids1[idx1] > 0:
ids2[idx2] = ids1[idx1]
elif ids2[idx2] > 0:
ids1[idx1] = ids2[idx2]
else:
# Not yet seen so set identifier for col1 and col2
ids1[idx1] = id_
ids2[idx2] = id_
id_ += 1
# Fill in unique identifiers for points with no near neighbor
for ids in (ids1, ids2):
for idx in np.flatnonzero(ids == 0):
ids[idx] = id_
id_ += 1
# End of enclosure join_func()
return ids1, ids2
return join_func
def join(
left,
right,
keys=None,
join_type="inner",
*,
keys_left=None,
keys_right=None,
uniq_col_name="{col_name}_{table_name}",
table_names=["1", "2"],
metadata_conflicts="warn",
join_funcs=None,
):
"""
Perform a join of the left table with the right table on specified keys.
Parameters
----------
left : `~astropy.table.Table`-like object
Left side table in the join. If not a Table, will call ``Table(left)``
right : `~astropy.table.Table`-like object
Right side table in the join. If not a Table, will call ``Table(right)``
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
keys_left : str or list of str or list of column-like, optional
Left column(s) used to match rows instead of ``keys`` arg. This can be
be a single left table column name or list of column names, or a list of
column-like values with the same lengths as the left table.
keys_right : str or list of str or list of column-like, optional
Same as ``keys_left``, but for the right side of the join.
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Try converting inputs to Table as needed
if not isinstance(left, Table):
left = Table(left)
if not isinstance(right, Table):
right = Table(right)
col_name_map = OrderedDict()
out = _join(
left,
right,
keys,
join_type,
uniq_col_name,
table_names,
col_name_map,
metadata_conflicts,
join_funcs,
keys_left=keys_left,
keys_right=keys_right,
)
# Merge the column and table meta data. Table subclasses might override
# these methods for custom merge behavior.
_merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts)
return out
def setdiff(table1, table2, keys=None):
"""
Take a set difference of table rows.
The row set difference will contain all rows in ``table1`` that are not
present in ``table2``. If the keys parameter is not defined, all columns in
``table1`` will be included in the output table.
Parameters
----------
table1 : `~astropy.table.Table`
``table1`` is on the left side of the set difference.
table2 : `~astropy.table.Table`
``table2`` is on the right side of the set difference.
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns in ``table1``.
Returns
-------
diff_table : `~astropy.table.Table`
New table containing the set difference between tables. If the set
difference is none, an empty table will be returned.
Examples
--------
To get a set difference between two tables::
>>> from astropy.table import setdiff, Table
>>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b'))
>>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 c
4 d
9 f
>>> print(t2)
a b
--- ---
1 c
5 b
9 f
>>> print(setdiff(t1, t2))
a b
--- ---
4 d
>>> print(setdiff(t2, t1))
a b
--- ---
5 b
"""
if keys is None:
keys = table1.colnames
# Check that all keys are in table1 and table2
for tbl, tbl_str in ((table1, "table1"), (table2, "table2")):
diff_keys = np.setdiff1d(keys, tbl.colnames)
if len(diff_keys) != 0:
raise ValueError(
"The {} columns are missing from {}, cannot take "
"a set difference.".format(diff_keys, tbl_str)
)
# Make a light internal copy of both tables
t1 = table1.copy(copy_data=False)
t1.meta = {}
t1.keep_columns(keys)
t1["__index1__"] = np.arange(len(table1)) # Keep track of rows indices
# Make a light internal copy to avoid touching table2
t2 = table2.copy(copy_data=False)
t2.meta = {}
t2.keep_columns(keys)
# Dummy column to recover rows after join
t2["__index2__"] = np.zeros(len(t2), dtype=np.uint8) # dummy column
t12 = _join(t1, t2, join_type="left", keys=keys, metadata_conflicts="silent")
# If t12 index2 is masked then that means some rows were in table1 but not table2.
if hasattr(t12["__index2__"], "mask"):
# Define bool mask of table1 rows not in table2
diff = t12["__index2__"].mask
# Get the row indices of table1 for those rows
idx = t12["__index1__"][diff]
# Select corresponding table1 rows straight from table1 to ensure
# correct table and column types.
t12_diff = table1[idx]
else:
t12_diff = table1[[]]
return t12_diff
def dstack(tables, join_type="outer", metadata_conflicts="warn"):
"""
Stack columns within tables depth-wise.
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof
Table(s) to stack along depth-wise with the current table
Table columns should have same shape and name for depth-wise stacking
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import dstack, Table
>>> t1 = Table({'a': [1., 2.], 'b': [3., 4.]}, names=('a', 'b'))
>>> t2 = Table({'a': [5., 6.], 'b': [7., 8.]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1.0 3.0
2.0 4.0
>>> print(t2)
a b
--- ---
5.0 7.0
6.0 8.0
>>> print(dstack([t1, t2]))
a b
---------- ----------
1.0 .. 5.0 3.0 .. 7.0
2.0 .. 6.0 4.0 .. 8.0
"""
_check_join_type(join_type, "dstack")
tables = _get_list_of_tables(tables)
if len(tables) == 1:
return tables[0] # no point in stacking a single table
n_rows = {len(table) for table in tables}
if len(n_rows) != 1:
raise ValueError("Table lengths must all match for dstack")
n_row = n_rows.pop()
out = vstack(tables, join_type, metadata_conflicts)
for name, col in out.columns.items():
col = out[name]
# Reshape to so each original column is now in a row.
# If entries are not 0-dim then those additional shape dims
# are just carried along.
# [x x x y y y] => [[x x x],
# [y y y]]
new_shape = (len(tables), n_row) + col.shape[1:]
try:
col.shape = (len(tables), n_row) + col.shape[1:]
except AttributeError:
col = col.reshape(new_shape)
# Transpose the table and row axes to get to
# [[x, y],
# [x, y]
# [x, y]]
axes = np.arange(len(col.shape))
axes[:2] = [1, 0]
# This temporarily makes `out` be corrupted (columns of different
# length) but it all works out in the end.
out.columns.__setitem__(name, col.transpose(axes), validated=True)
return out
def vstack(tables, join_type="outer", metadata_conflicts="warn"):
"""
Stack tables vertically (along rows).
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof
Table(s) to stack along rows (vertically) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(vstack([t1, t2]))
a b
--- ---
1 3
2 4
5 7
6 8
"""
_check_join_type(join_type, "vstack")
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _vstack(tables, join_type, col_name_map, metadata_conflicts)
# Merge table metadata
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def hstack(
tables,
join_type="outer",
uniq_col_name="{col_name}_{table_name}",
table_names=None,
metadata_conflicts="warn",
):
"""
Stack tables along columns (horizontally).
A ``join_type`` of 'exact' means that the tables must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' (default)
means the output will have the union of all rows, with table values being
masked where no common values are available.
Parameters
----------
tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof
Tables to stack along columns (horizontally) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value,
but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
See Also
--------
Table.add_columns, Table.replace_column, Table.update
Examples
--------
To stack two tables horizontally (along columns) do::
>>> from astropy.table import Table, hstack
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
c d
--- ---
5 7
6 8
>>> print(hstack([t1, t2]))
a b c d
--- --- --- ---
1 3 5 7
2 4 6 8
"""
_check_join_type(join_type, "hstack")
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _hstack(tables, join_type, uniq_col_name, table_names, col_name_map)
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def unique(input_table, keys=None, silent=False, keep="first"):
"""
Returns the unique rows of a table.
Parameters
----------
input_table : table-like
keys : str or list of str
Name(s) of column(s) used to create unique rows.
Default is to use all columns.
keep : {'first', 'last', 'none'}
Whether to keep the first or last row for each set of
duplicates. If 'none', all rows that are duplicate are
removed, leaving only rows that are already unique in
the input.
Default is 'first'.
silent : bool
If `True`, masked value column(s) are silently removed from
``keys``. If `False`, an exception is raised when ``keys``
contains masked value column(s).
Default is `False`.
Returns
-------
unique_table : `~astropy.table.Table` object
New table containing only the unique rows of ``input_table``.
Examples
--------
>>> from astropy.table import unique, Table
>>> import numpy as np
>>> table = Table(data=[[1,2,3,2,3,3],
... [2,3,4,5,4,6],
... [3,4,5,6,7,8]],
... names=['col1', 'col2', 'col3'],
... dtype=[np.int32, np.int32, np.int32])
>>> table
<Table length=6>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
2 5 6
3 4 7
3 6 8
>>> unique(table, keys='col1')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
>>> unique(table, keys=['col1'], keep='last')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 5 6
3 6 8
>>> unique(table, keys=['col1', 'col2'])
<Table length=5>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 4 5
3 6 8
>>> unique(table, keys=['col1', 'col2'], keep='none')
<Table length=4>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 6 8
>>> unique(table, keys=['col1'], keep='none')
<Table length=1>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
"""
if keep not in ("first", "last", "none"):
raise ValueError("'keep' should be one of 'first', 'last', 'none'")
if isinstance(keys, str):
keys = [keys]
if keys is None:
keys = input_table.colnames
else:
if len(set(keys)) != len(keys):
raise ValueError("duplicate key names")
# Check for columns with masked values
for key in keys[:]:
col = input_table[key]
if hasattr(col, "mask") and np.any(col.mask):
if not silent:
raise ValueError(
"cannot use columns with masked values as keys; "
"remove column '{}' from keys and rerun "
"unique()".format(key)
)
del keys[keys.index(key)]
if len(keys) == 0:
raise ValueError(
"no column remained in ``keys``; "
"unique() cannot work with masked value "
"key columns"
)
grouped_table = input_table.group_by(keys)
indices = grouped_table.groups.indices
if keep == "first":
indices = indices[:-1]
elif keep == "last":
indices = indices[1:] - 1
else:
indices = indices[:-1][np.diff(indices) == 1]
return grouped_table[indices]
def get_col_name_map(
arrays, common_names, uniq_col_name="{col_name}_{table_name}", table_names=None
):
"""
Find the column names mapping when merging the list of tables
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.colnames:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.colnames for other in others):
out_name = uniq_col_name.format(
table_name=table_name, col_name=name
)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError(
"Merging column names resulted in duplicates: {}. "
"Change uniq_col_name or table_names args to fix this.".format(
repeated_names
)
)
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError(
"The '{}' columns have incompatible types: {}".format(
names[0], tme._incompat_types
)
) from tme
# Make sure all input shapes are the same
uniq_shapes = {col.shape[1:] for col in in_cols}
if len(uniq_shapes) != 1:
raise TableMergeError(f"Key columns {names!r} have different shape")
shape = uniq_shapes.pop()
if out_name is not None:
out_name = str(out_name)
out_descrs.append((out_name, dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
try:
return metadata.common_dtype(cols)
except metadata.MergeConflictError as err:
tme = TableMergeError(f"Columns have incompatible types {err._incompat_types}")
tme._incompat_types = err._incompat_types
raise tme from err
def _get_join_sort_idxs(keys, left, right):
# Go through each of the key columns in order and make columns for
# a new structured array that represents the lexical ordering of those
# key columns. This structured array is then argsort'ed. The trick here
# is that some columns (e.g. Time) may need to be expanded into multiple
# columns for ordering here.
ii = 0 # Index for uniquely naming the sort columns
# sortable_table dtypes as list of (name, dtype_str, shape) tuples
sort_keys_dtypes = []
sort_keys = [] # sortable_table (structured ndarray) column names
sort_left = {} # sortable ndarrays from left table
sort_right = {} # sortable ndarray from right table
for key in keys:
# get_sortable_arrays() returns a list of ndarrays that can be lexically
# sorted to represent the order of the column. In most cases this is just
# a single element of the column itself.
left_sort_cols = left[key].info.get_sortable_arrays()
right_sort_cols = right[key].info.get_sortable_arrays()
if len(left_sort_cols) != len(right_sort_cols):
# Should never happen because cols are screened beforehand for compatibility
raise RuntimeError("mismatch in sort cols lengths")
for left_sort_col, right_sort_col in zip(left_sort_cols, right_sort_cols):
# Check for consistency of shapes. Mismatch should never happen.
shape = left_sort_col.shape[1:]
if shape != right_sort_col.shape[1:]:
raise RuntimeError("mismatch in shape of left vs. right sort array")
if shape != ():
raise ValueError(f"sort key column {key!r} must be 1-d")
sort_key = str(ii)
sort_keys.append(sort_key)
sort_left[sort_key] = left_sort_col
sort_right[sort_key] = right_sort_col
# Build up dtypes for the structured array that gets sorted.
dtype_str = common_dtype([left_sort_col, right_sort_col])
sort_keys_dtypes.append((sort_key, dtype_str))
ii += 1
# Make the empty sortable table and fill it
len_left = len(left)
sortable_table = np.empty(len_left + len(right), dtype=sort_keys_dtypes)
for key in sort_keys:
sortable_table[key][:len_left] = sort_left[key]
sortable_table[key][len_left:] = sort_right[key]
# Finally do the (lexical) argsort and make a new sorted version
idx_sort = sortable_table.argsort(order=sort_keys)
sorted_table = sortable_table[idx_sort]
# Get indexes of unique elements (i.e. the group boundaries)
diffs = np.concatenate(([True], sorted_table[1:] != sorted_table[:-1], [True]))
idxs = np.flatnonzero(diffs)
return idxs, idx_sort
def _apply_join_funcs(left, right, keys, join_funcs):
"""Apply join_funcs."""
# Make light copies of left and right, then add new index columns.
left = left.copy(copy_data=False)
right = right.copy(copy_data=False)
for key, join_func in join_funcs.items():
ids1, ids2 = join_func(left[key], right[key])
# Define a unique id_key name, and keep adding underscores until we have
# a name not yet present.
id_key = key + "_id"
while id_key in left.columns or id_key in right.columns:
id_key = id_key[:-2] + "_id"
keys = tuple(id_key if orig_key == key else orig_key for orig_key in keys)
left.add_column(ids1, index=0, name=id_key) # [id_key] = ids1
right.add_column(ids2, index=0, name=id_key) # [id_key] = ids2
return left, right, keys
def _join(
left,
right,
keys=None,
join_type="inner",
uniq_col_name="{col_name}_{table_name}",
table_names=["1", "2"],
col_name_map=None,
metadata_conflicts="warn",
join_funcs=None,
keys_left=None,
keys_right=None,
):
"""
Perform a join of the left and right Tables on specified keys.
Parameters
----------
left : Table
Left side table in the join
right : Table
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Special column name for cartesian join, should never collide with real column
cartesian_index_name = "__table_cartesian_join_temp_index__"
if join_type not in ("inner", "outer", "left", "right", "cartesian"):
raise ValueError(
"The 'join_type' argument should be in 'inner', "
"'outer', 'left', 'right', or 'cartesian' "
"(got '{}' instead)".format(join_type)
)
if join_type == "cartesian":
if keys:
raise ValueError("cannot supply keys for a cartesian join")
if join_funcs:
raise ValueError("cannot supply join_funcs for a cartesian join")
# Make light copies of left and right, then add temporary index columns
# with all the same value so later an outer join turns into a cartesian join.
left = left.copy(copy_data=False)
right = right.copy(copy_data=False)
left[cartesian_index_name] = np.uint8(0)
right[cartesian_index_name] = np.uint8(0)
keys = (cartesian_index_name,)
# Handle the case of join key columns that are different between left and
# right via keys_left/keys_right args. This is done by saving the original
# input tables and making new left and right tables that contain only the
# key cols but with common column names ['0', '1', etc]. This sets `keys` to
# those fake key names in the left and right tables
if keys_left is not None or keys_right is not None:
left_orig = left
right_orig = right
left, right, keys = _join_keys_left_right(
left, right, keys, keys_left, keys_right, join_funcs
)
if keys is None:
keys = tuple(name for name in left.colnames if name in right.colnames)
if len(keys) == 0:
raise TableMergeError("No keys in common between left and right tables")
elif isinstance(keys, str):
# If we have a single key, put it in a tuple
keys = (keys,)
# Check the key columns
for arr, arr_label in ((left, "Left"), (right, "Right")):
for name in keys:
if name not in arr.colnames:
raise TableMergeError(
f"{arr_label} table does not have key column {name!r}"
)
if hasattr(arr[name], "mask") and np.any(arr[name].mask):
raise TableMergeError(
f"{arr_label} key column {name!r} has missing values"
)
if join_funcs is not None:
if not all(key in keys for key in join_funcs):
raise ValueError(
f"join_funcs keys {join_funcs.keys()} must be a "
f"subset of join keys {keys}"
)
left, right, keys = _apply_join_funcs(left, right, keys, join_funcs)
len_left, len_right = len(left), len(right)
if len_left == 0 or len_right == 0:
raise ValueError("input tables for join must both have at least one row")
try:
idxs, idx_sort = _get_join_sort_idxs(keys, left, right)
except NotImplementedError:
raise TypeError("one or more key columns are not sortable")
# Now that we have idxs and idx_sort, revert to the original table args to
# carry on with making the output joined table. `keys` is set to to an empty
# list so that all original left and right columns are included in the
# output table.
if keys_left is not None or keys_right is not None:
keys = []
left = left_orig
right = right_orig
# Joined array dtype as a list of descr (name, type_str, shape) tuples
col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names)
out_descrs = get_descrs([left, right], col_name_map)
# Main inner loop in Cython to compute the cartesian product
# indices for the given join type
int_join_type = {"inner": 0, "outer": 1, "left": 2, "right": 3, "cartesian": 1}[
join_type
]
masked, n_out, left_out, left_mask, right_out, right_mask = _np_utils.join_inner(
idxs, idx_sort, len_left, int_join_type
)
out = _get_out_class([left, right])()
for out_name, dtype, shape in out_descrs:
if out_name == cartesian_index_name:
continue
left_name, right_name = col_name_map[out_name]
if left_name and right_name: # this is a key which comes from left and right
cols = [left[left_name], right[right_name]]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, "new_like"):
raise NotImplementedError(
f"join unavailable for mixin column type(s): {col_cls.__name__}"
)
out[out_name] = col_cls.info.new_like(
cols, n_out, metadata_conflicts, out_name
)
out[out_name][:] = np.where(
right_mask,
left[left_name].take(left_out),
right[right_name].take(right_out),
)
continue
elif left_name: # out_name came from the left table
name, array, array_out, array_mask = left_name, left, left_out, left_mask
elif right_name:
name, array, array_out, array_mask = (
right_name,
right,
right_out,
right_mask,
)
else:
raise TableMergeError('Unexpected column names (maybe one is ""?)')
# Select the correct elements from the original table
col = array[name][array_out]
# If the output column is masked then set the output column masking
# accordingly. Check for columns that don't support a mask attribute.
if masked and np.any(array_mask):
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
if isinstance(col, Quantity) and not isinstance(col, Masked):
col = Masked(col, copy=False)
# array_mask is 1-d corresponding to length of output column. We need
# make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..).
# Mixin columns might not have ndim attribute so use len(col.shape).
array_mask.shape = (col.shape[0],) + (1,) * (len(col.shape) - 1)
# Now broadcast to the correct final shape
array_mask = np.broadcast_to(array_mask, col.shape)
try:
col[array_mask] = col.info.mask_val
except Exception as err: # Not clear how different classes will fail here
raise NotImplementedError(
"join requires masking column '{}' but column"
" type {} does not support masking".format(
out_name, col.__class__.__name__
)
) from err
# Set the output table column to the new joined column
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _join_keys_left_right(left, right, keys, keys_left, keys_right, join_funcs):
"""Do processing to handle keys_left / keys_right args for join.
This takes the keys_left/right inputs and turns them into a list of left/right
columns corresponding to those inputs (which can be column names or column
data values). It also generates the list of fake key column names (strings
of "1", "2", etc.) that correspond to the input keys.
"""
def _keys_to_cols(keys, table, label):
# Process input `keys`, which is a str or list of str column names in
# `table` or a list of column-like objects. The `label` is just for
# error reporting.
if isinstance(keys, str):
keys = [keys]
cols = []
for key in keys:
if isinstance(key, str):
try:
cols.append(table[key])
except KeyError:
raise ValueError(f"{label} table does not have key column {key!r}")
else:
if len(key) != len(table):
raise ValueError(
f"{label} table has different length from key {key}"
)
cols.append(key)
return cols
if join_funcs is not None:
raise ValueError("cannot supply join_funcs arg and keys_left / keys_right")
if keys_left is None or keys_right is None:
raise ValueError("keys_left and keys_right must both be provided")
if keys is not None:
raise ValueError(
"keys arg must be None if keys_left and keys_right are supplied"
)
cols_left = _keys_to_cols(keys_left, left, "left")
cols_right = _keys_to_cols(keys_right, right, "right")
if len(cols_left) != len(cols_right):
raise ValueError("keys_left and keys_right args must have same length")
# Make two new temp tables for the join with only the join columns and
# key columns in common.
keys = [f"{ii}" for ii in range(len(cols_left))]
left = left.__class__(cols_left, names=keys, copy=False)
right = right.__class__(cols_right, names=keys, copy=False)
return left, right, keys
def _check_join_type(join_type, func_name):
"""Check join_type arg in hstack and vstack.
This specifically checks for the common mistake of call vstack(t1, t2)
instead of vstack([t1, t2]). The subsequent check of
``join_type in ('inner', ..)`` does not raise in this case.
"""
if not isinstance(join_type, str):
msg = "`join_type` arg must be a string"
if isinstance(join_type, Table):
msg += (
". Did you accidentally "
f"call {func_name}(t1, t2, ..) instead of "
f"{func_name}([t1, t2], ..)?"
)
raise TypeError(msg)
if join_type not in ("inner", "exact", "outer"):
raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")
def _vstack(arrays, join_type="outer", col_name_map=None, metadata_conflicts="warn"):
"""
Stack Tables vertically (by rows).
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same column names (though the order can vary). If
``join_type`` is 'inner' then the intersection of common columns will
be the output. A value of 'outer' means the output will have the union of
all columns, with array values being masked where no common values are
available.
Parameters
----------
arrays : list of Tables
Tables to stack by rows (vertically)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Trivial case of one input array
if len(arrays) == 1:
return arrays[0]
# Start by assuming an outer match where all names go to output
names = set(itertools.chain(*[arr.colnames for arr in arrays]))
col_name_map = get_col_name_map(arrays, names)
# If require_match is True then the output must have exactly the same
# number of columns as each input array
if join_type == "exact":
for names in col_name_map.values():
if any(x is None for x in names):
raise TableMergeError(
"Inconsistent columns in input arrays "
"(use 'inner' or 'outer' join_type to "
"allow non-matching columns)"
)
join_type = "outer"
# For an inner join, keep only columns where all input arrays have that column
if join_type == "inner":
col_name_map = OrderedDict(
(name, in_names)
for name, in_names in col_name_map.items()
if all(x is not None for x in in_names)
)
if len(col_name_map) == 0:
raise TableMergeError("Input arrays have no columns in common")
lens = [len(arr) for arr in arrays]
n_rows = sum(lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, "new_like"):
raise NotImplementedError(
f"vstack unavailable for mixin column type(s): {col_cls.__name__}"
)
try:
col = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name)
except metadata.MergeConflictError as err:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError(
"The '{}' columns have incompatible types: {}".format(
out_name, err._incompat_types
)
) from err
idx0 = 0
for name, array in zip(in_names, arrays):
idx1 = idx0 + len(array)
if name in array.colnames:
col[idx0:idx1] = array[name]
else:
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
if isinstance(col, Quantity) and not isinstance(col, Masked):
col = Masked(col, copy=False)
try:
col[idx0:idx1] = col.info.mask_val
except Exception as err:
raise NotImplementedError(
"vstack requires masking column '{}' but column"
" type {} does not support masking".format(
out_name, col.__class__.__name__
)
) from err
idx0 = idx1
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _hstack(
arrays,
join_type="outer",
uniq_col_name="{col_name}_{table_name}",
table_names=None,
col_name_map=None,
):
"""
Stack tables horizontally (by columns).
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' means
the output will have the union of all rows, with array values being
masked where no common values are available.
Parameters
----------
arrays : List of tables
Tables to stack by columns (horizontally)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
if table_names is None:
table_names = [f"{ii + 1}" for ii in range(len(arrays))]
if len(arrays) != len(table_names):
raise ValueError("Number of arrays must match number of table_names")
# Trivial case of one input arrays
if len(arrays) == 1:
return arrays[0]
col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)
# If require_match is True then all input arrays must have the same length
arr_lens = [len(arr) for arr in arrays]
if join_type == "exact":
if len(set(arr_lens)) > 1:
raise TableMergeError(
"Inconsistent number of rows in input arrays "
"(use 'inner' or 'outer' join_type to allow "
"non-matching rows)"
)
join_type = "outer"
# For an inner join, keep only the common rows
if join_type == "inner":
min_arr_len = min(arr_lens)
if len(set(arr_lens)) > 1:
arrays = [arr[:min_arr_len] for arr in arrays]
arr_lens = [min_arr_len for arr in arrays]
# If there are any output rows where one or more input arrays are missing
# then the output must be masked. If any input arrays are masked then
# output is masked.
n_rows = max(arr_lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
for name, array, arr_len in zip(in_names, arrays, arr_lens):
if name is None:
continue
if n_rows > arr_len:
indices = np.arange(n_rows)
indices[arr_len:] = 0
col = array[name][indices]
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
if isinstance(col, Quantity) and not isinstance(col, Masked):
col = Masked(col, copy=False)
try:
col[arr_len:] = col.info.mask_val
except Exception as err:
raise NotImplementedError(
"hstack requires masking column '{}' but column"
" type {} does not support masking".format(
out_name, col.__class__.__name__
)
) from err
else:
col = array[name][:n_rows]
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
|
1f61d8e322f6fe931542063af654c6a20dc06d9a1550fbada440918a6282d6b9 | """
High-level operations for numpy structured arrays.
Some code and inspiration taken from numpy.lib.recfunctions.join_by().
Redistribution license restrictions apply.
"""
import collections
from collections import Counter, OrderedDict
from collections.abc import Sequence
import numpy as np
__all__ = ["TableMergeError"]
class TableMergeError(ValueError):
pass
def get_col_name_map(
arrays, common_names, uniq_col_name="{col_name}_{table_name}", table_names=None
):
"""
Find the column names mapping when merging the list of structured ndarrays
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.dtype.names:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.dtype.names for other in others):
out_name = uniq_col_name.format(
table_name=table_name, col_name=name
)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError(
"Merging column names resulted in duplicates: {}. "
"Change uniq_col_name or table_names args to fix this.".format(
repeated_names
)
)
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError(
"The '{}' columns have incompatible types: {}".format(
names[0], tme._incompat_types
)
) from tme
# Make sure all input shapes are the same
uniq_shapes = {col.shape[1:] for col in in_cols}
if len(uniq_shapes) != 1:
raise TableMergeError("Key columns have different shape")
shape = uniq_shapes.pop()
if out_name is not None:
out_name = str(out_name)
out_descrs.append((out_name, dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of structured ndarray columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = {
tuple(issubclass(col.dtype.type, np_type) for np_type in np_types)
for col in cols
}
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [col.dtype.name for col in cols]
tme = TableMergeError(f"Columns have incompatible types {incompat_types}")
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=col.dtype) for col in cols]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for arr in arrs:
if arr.dtype.kind in ("S", "U"):
arr[0] = "0" * arr.itemsize
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str
def _check_for_sequence_of_structured_arrays(arrays):
err = "`arrays` arg must be a sequence (e.g. list) of structured arrays"
if not isinstance(arrays, Sequence):
raise TypeError(err)
for array in arrays:
# Must be structured array
if not isinstance(array, np.ndarray) or array.dtype.names is None:
raise TypeError(err)
if len(arrays) == 0:
raise ValueError("`arrays` arg must include at least one array")
|
9675a95e63d7df383a1103cdc41b3c41cd403fb235902680f584c1a88e2d263d | import copy
import json
import textwrap
from collections import OrderedDict
import numpy as np
import yaml
__all__ = ["get_header_from_yaml", "get_yaml_from_header", "get_yaml_from_table"]
class ColumnOrderList(list):
"""
List of tuples that sorts in a specific order that makes sense for
astropy table column attributes.
"""
def sort(self, *args, **kwargs):
super().sort()
column_keys = ["name", "unit", "datatype", "format", "description", "meta"]
in_dict = dict(self)
out_list = []
for key in column_keys:
if key in in_dict:
out_list.append((key, in_dict[key]))
for key, val in self:
if key not in column_keys:
out_list.append((key, val))
# Clear list in-place
del self[:]
self.extend(out_list)
class ColumnDict(dict):
"""
Specialized dict subclass to represent attributes of a Column
and return items() in a preferred order. This is only for use
in generating a YAML map representation that has a fixed order.
"""
def items(self):
"""
Return items as a ColumnOrderList, which sorts in the preferred
way for column attributes.
"""
return ColumnOrderList(super().items())
def _construct_odict(load, node):
"""
Construct OrderedDict from !!omap in yaml safe load.
Source: https://gist.github.com/weaver/317164
License: Unspecified
This is the same as SafeConstructor.construct_yaml_omap(),
except the data type is changed to OrderedDict() and setitem is
used instead of append in the loop
Examples
--------
::
>>> yaml.load(''' # doctest: +SKIP
... !!omap
... - foo: bar
... - mumble: quux
... - baz: gorp
... ''')
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
"""
omap = OrderedDict()
yield omap
if not isinstance(node, yaml.SequenceNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map",
node.start_mark,
f"expected a sequence, but found {node.id}",
node.start_mark,
)
for subnode in node.value:
if not isinstance(subnode, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map",
node.start_mark,
f"expected a mapping of length 1, but found {subnode.id}",
subnode.start_mark,
)
if len(subnode.value) != 1:
raise yaml.constructor.ConstructorError(
"while constructing an ordered map",
node.start_mark,
f"expected a single mapping item, but found {len(subnode.value)} items",
subnode.start_mark,
)
key_node, value_node = subnode.value[0]
key = load.construct_object(key_node)
value = load.construct_object(value_node)
omap[key] = value
def _repr_pairs(dump, tag, sequence, flow_style=None):
"""
This is the same code as BaseRepresenter.represent_sequence(),
but the value passed to dump.represent_data() in the loop is a
dictionary instead of a tuple.
Source: https://gist.github.com/weaver/317164
License: Unspecified
"""
value = []
node = yaml.SequenceNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
for key, val in sequence:
item = dump.represent_data({key: val})
if not (isinstance(item, yaml.ScalarNode) and not item.style):
best_style = False
value.append(item)
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
def _repr_odict(dumper, data):
"""
Represent OrderedDict in yaml dump.
Source: https://gist.github.com/weaver/317164
License: Unspecified
>>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP
'!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n'
>>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP
'!!omap [foo: bar, mumble: quux, baz: gorp]\\n'
"""
return _repr_pairs(dumper, "tag:yaml.org,2002:omap", data.items())
def _repr_column_dict(dumper, data):
"""
Represent ColumnDict in yaml dump.
This is the same as an ordinary mapping except that the keys
are written in a fixed order that makes sense for astropy table
columns.
"""
return dumper.represent_mapping("tag:yaml.org,2002:map", data)
def _get_variable_length_array_shape(col):
"""Check if object-type ``col`` is really a variable length list.
That is true if the object consists purely of list of nested lists, where
the shape of every item can be represented as (m, n, ..., *) where the (m,
n, ...) are constant and only the lists in the last axis have variable
shape. If so the returned value of shape will be a tuple in the form (m, n,
..., None).
If ``col`` is a variable length array then the return ``dtype`` corresponds
to the type found by numpy for all the individual values. Otherwise it will
be ``np.dtype(object)``.
Parameters
==========
col : column-like
Input table column, assumed to be object-type
Returns
=======
shape : tuple
Inferred variable length shape or None
dtype : np.dtype
Numpy dtype that applies to col
"""
class ConvertError(ValueError):
"""Local conversion error used below."""
# Numpy types supported as variable-length arrays
np_classes = (np.floating, np.integer, np.bool_, np.unicode_)
try:
if len(col) == 0 or not all(isinstance(val, np.ndarray) for val in col):
raise ConvertError
dtype = col[0].dtype
shape = col[0].shape[:-1]
for val in col:
if not issubclass(val.dtype.type, np_classes) or val.shape[:-1] != shape:
raise ConvertError
dtype = np.promote_types(dtype, val.dtype)
shape = shape + (None,)
except ConvertError:
# `col` is not a variable length array, return shape and dtype to
# the original. Note that this function is only called if
# col.shape[1:] was () and col.info.dtype is object.
dtype = col.info.dtype
shape = ()
return shape, dtype
def _get_datatype_from_dtype(dtype):
"""Return string version of ``dtype`` for writing to ECSV ``datatype``."""
datatype = dtype.name
if datatype.startswith(("bytes", "str")):
datatype = "string"
if datatype.endswith("_"):
datatype = datatype[:-1] # string_ and bool_ lose the final _ for ECSV
return datatype
def _get_col_attributes(col):
"""
Extract information from a column (apart from the values) that is required
to fully serialize the column.
Parameters
----------
col : column-like
Input Table column
Returns
-------
attrs : dict
Dict of ECSV attributes for ``col``
"""
dtype = col.info.dtype # Type of column values that get written
subtype = None # Type of data for object columns serialized with JSON
shape = col.shape[1:] # Shape of multidim / variable length columns
if dtype.name == "object":
if shape == ():
# 1-d object type column might be a variable length array
dtype = np.dtype(str)
shape, subtype = _get_variable_length_array_shape(col)
else:
# N-d object column is subtype object but serialized as JSON string
dtype = np.dtype(str)
subtype = np.dtype(object)
elif shape:
# N-d column which is not object is serialized as JSON string
dtype = np.dtype(str)
subtype = col.info.dtype
datatype = _get_datatype_from_dtype(dtype)
# Set the output attributes
attrs = ColumnDict()
attrs["name"] = col.info.name
attrs["datatype"] = datatype
for attr, nontrivial, xform in (
("unit", lambda x: x is not None, str),
("format", lambda x: x is not None, None),
("description", lambda x: x is not None, None),
("meta", lambda x: x, None),
):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
attrs[attr] = xform(col_attr) if xform else col_attr
if subtype:
attrs["subtype"] = _get_datatype_from_dtype(subtype)
# Numpy 'object' maps to 'subtype' of 'json' in ECSV
if attrs["subtype"] == "object":
attrs["subtype"] = "json"
if shape:
attrs["subtype"] += json.dumps(list(shape), separators=(",", ":"))
return attrs
def get_yaml_from_table(table):
"""
Return lines with a YAML representation of header content from the ``table``.
Parameters
----------
table : `~astropy.table.Table` object
Table for which header content is output
Returns
-------
lines : list
List of text lines with YAML header content
"""
header = {"cols": list(table.columns.values())}
if table.meta:
header["meta"] = table.meta
return get_yaml_from_header(header)
def get_yaml_from_header(header):
"""
Return lines with a YAML representation of header content from a Table.
The ``header`` dict must contain these keys:
- 'cols' : list of table column objects (required)
- 'meta' : table 'meta' attribute (optional)
Other keys included in ``header`` will be serialized in the output YAML
representation.
Parameters
----------
header : dict
Table header content
Returns
-------
lines : list
List of text lines with YAML header content
"""
from astropy.io.misc.yaml import AstropyDumper
class TableDumper(AstropyDumper):
"""
Custom Dumper that represents OrderedDict as an !!omap object.
"""
def represent_mapping(self, tag, mapping, flow_style=None):
"""
This is a combination of the Python 2 and 3 versions of this method
in the PyYAML library to allow the required key ordering via the
ColumnOrderList object. The Python 3 version insists on turning the
items() mapping into a list object and sorting, which results in
alphabetical order for the column keys.
"""
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, "items"):
mapping = mapping.items()
if hasattr(mapping, "sort"):
mapping.sort()
else:
mapping = list(mapping)
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style):
best_style = False
if not (
isinstance(node_value, yaml.ScalarNode) and not node_value.style
):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
TableDumper.add_representer(OrderedDict, _repr_odict)
TableDumper.add_representer(ColumnDict, _repr_column_dict)
header = copy.copy(header) # Don't overwrite original
header["datatype"] = [_get_col_attributes(col) for col in header["cols"]]
del header["cols"]
lines = yaml.dump(
header, default_flow_style=None, Dumper=TableDumper, width=130
).splitlines()
return lines
class YamlParseError(Exception):
pass
def get_header_from_yaml(lines):
"""
Get a header dict from input ``lines`` which should be valid YAML. This
input will typically be created by get_yaml_from_header. The output is a
dictionary which describes all the table and column meta.
The get_cols() method in the io/ascii/ecsv.py file should be used as a
guide to using the information when constructing a table using this
header dict information.
Parameters
----------
lines : list
List of text lines with YAML header content
Returns
-------
header : dict
Dictionary describing table and column meta
"""
from astropy.io.misc.yaml import AstropyLoader
class TableLoader(AstropyLoader):
"""
Custom Loader that constructs OrderedDict from an !!omap object.
This does nothing but provide a namespace for adding the
custom odict constructor.
"""
TableLoader.add_constructor("tag:yaml.org,2002:omap", _construct_odict)
# Now actually load the YAML data structure into `meta`
header_yaml = textwrap.dedent("\n".join(lines))
try:
header = yaml.load(header_yaml, Loader=TableLoader)
except Exception as err:
raise YamlParseError() from err
return header
|
7ddf38407f2b485f2673d4b5c350a1399b1406c97e6a95b0a6a5303469b5c44c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
def _searchsorted(array, val, side="left"):
"""
Call np.searchsorted or use a custom binary
search if necessary.
"""
if hasattr(array, "searchsorted"):
return array.searchsorted(val, side=side)
# Python binary search
begin = 0
end = len(array)
while begin < end:
mid = (begin + end) // 2
if val > array[mid]:
begin = mid + 1
elif val < array[mid]:
end = mid
elif side == "right":
begin = mid + 1
else:
end = mid
return begin
class SortedArray:
"""
Implements a sorted array container using
a list of numpy arrays.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool
Whether the values of the index must be unique.
Defaults to False.
"""
def __init__(self, data, row_index, unique=False):
self.data = data
self.row_index = row_index
self.num_cols = len(getattr(data, "colnames", []))
self.unique = unique
@property
def cols(self):
return list(self.data.columns.values())
def add(self, key, row):
"""
Add a new entry to the sorted array.
Parameters
----------
key : tuple
Column values at the given row
row : int
Row number
"""
pos = self.find_pos(key, row) # first >= key
if (
self.unique
and 0 <= pos < len(self.row_index)
and all(self.data[pos][i] == key[i] for i in range(len(key)))
):
# already exists
raise ValueError(f'Cannot add duplicate value "{key}" in a unique index')
self.data.insert_row(pos, key)
self.row_index = self.row_index.insert(pos, row)
def _get_key_slice(self, i, begin, end):
"""
Retrieve the ith slice of the sorted array
from begin to end.
"""
if i < self.num_cols:
return self.cols[i][begin:end]
else:
return self.row_index[begin:end]
def find_pos(self, key, data, exact=False):
"""
Return the index of the largest key in data greater than or
equal to the given key, data pair.
Parameters
----------
key : tuple
Column key
data : int
Row number
exact : bool
If True, return the index of the given key in data
or -1 if the key is not present.
"""
begin = 0
end = len(self.row_index)
num_cols = self.num_cols
if not self.unique:
# consider the row value as well
key = key + (data,)
num_cols += 1
# search through keys in lexicographic order
for i in range(num_cols):
key_slice = self._get_key_slice(i, begin, end)
t = _searchsorted(key_slice, key[i])
# t is the smallest index >= key[i]
if exact and (t == len(key_slice) or key_slice[t] != key[i]):
# no match
return -1
elif t == len(key_slice) or (
t == 0 and len(key_slice) > 0 and key[i] < key_slice[0]
):
# too small or too large
return begin + t
end = begin + _searchsorted(key_slice, key[i], side="right")
begin += t
if begin >= len(self.row_index): # greater than all keys
return begin
return begin
def find(self, key):
"""
Find all rows matching the given key.
Parameters
----------
key : tuple
Column values
Returns
-------
matching_rows : list
List of rows matching the input key
"""
begin = 0
end = len(self.row_index)
# search through keys in lexicographic order
for i in range(self.num_cols):
key_slice = self._get_key_slice(i, begin, end)
t = _searchsorted(key_slice, key[i])
# t is the smallest index >= key[i]
if t == len(key_slice) or key_slice[t] != key[i]:
# no match
return []
elif t == 0 and len(key_slice) > 0 and key[i] < key_slice[0]:
# too small or too large
return []
end = begin + _searchsorted(key_slice, key[i], side="right")
begin += t
if begin >= len(self.row_index): # greater than all keys
return []
return self.row_index[begin:end]
def range(self, lower, upper, bounds):
"""
Find values in the given range.
Parameters
----------
lower : tuple
Lower search bound
upper : tuple
Upper search bound
bounds : (2,) tuple of bool
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument corresponds to an inclusive lower bound,
and the second argument to an inclusive upper bound.
"""
lower_pos = self.find_pos(lower, 0)
upper_pos = self.find_pos(upper, 0)
if lower_pos == len(self.row_index):
return []
lower_bound = tuple(col[lower_pos] for col in self.cols)
if not bounds[0] and lower_bound == lower:
lower_pos += 1 # data[lower_pos] > lower
# data[lower_pos] >= lower
# data[upper_pos] >= upper
if upper_pos < len(self.row_index):
upper_bound = tuple(col[upper_pos] for col in self.cols)
if not bounds[1] and upper_bound == upper:
upper_pos -= 1 # data[upper_pos] < upper
elif upper_bound > upper:
upper_pos -= 1 # data[upper_pos] <= upper
return self.row_index[lower_pos : upper_pos + 1]
def remove(self, key, data):
"""
Remove the given entry from the sorted array.
Parameters
----------
key : tuple
Column values
data : int
Row number
Returns
-------
successful : bool
Whether the entry was successfully removed
"""
pos = self.find_pos(key, data, exact=True)
if pos == -1: # key not found
return False
self.data.remove_row(pos)
keep_mask = np.ones(len(self.row_index), dtype=bool)
keep_mask[pos] = False
self.row_index = self.row_index[keep_mask]
return True
def shift_left(self, row):
"""
Decrement all row numbers greater than the input row.
Parameters
----------
row : int
Input row number
"""
self.row_index[self.row_index > row] -= 1
def shift_right(self, row):
"""
Increment all row numbers greater than or equal to the input row.
Parameters
----------
row : int
Input row number
"""
self.row_index[self.row_index >= row] += 1
def replace_rows(self, row_map):
"""
Replace all rows with the values they map to in the
given dictionary. Any rows not present as keys in
the dictionary will have their entries deleted.
Parameters
----------
row_map : dict
Mapping of row numbers to new row numbers
"""
num_rows = len(row_map)
keep_rows = np.zeros(len(self.row_index), dtype=bool)
tagged = 0
for i, row in enumerate(self.row_index):
if row in row_map:
keep_rows[i] = True
tagged += 1
if tagged == num_rows:
break
self.data = self.data[keep_rows]
self.row_index = np.array([row_map[x] for x in self.row_index[keep_rows]])
def items(self):
"""
Retrieve all array items as a list of pairs of the form
[(key, [row 1, row 2, ...]), ...].
"""
array = []
last_key = None
for i, key in enumerate(zip(*self.data.columns.values())):
row = self.row_index[i]
if key == last_key:
array[-1][1].append(row)
else:
last_key = key
array.append((key, [row]))
return array
def sort(self):
"""
Make row order align with key order.
"""
self.row_index = np.arange(len(self.row_index))
def sorted_data(self):
"""
Return rows in sorted order.
"""
return self.row_index
def __getitem__(self, item):
"""
Return a sliced reference to this sorted array.
Parameters
----------
item : slice
Slice to use for referencing
"""
return SortedArray(self.data[item], self.row_index[item])
def __repr__(self):
t = self.data.copy()
t["rows"] = self.row_index
return f"<{self.__class__.__name__} length={len(t)}>\n{t}"
|
b625905169ac7955e3be8c8c8dd306710c58251ef5d19bc5ffa1132d73c995ef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helper functions for table development, mostly creating useful
tables for testing.
"""
import string
from itertools import cycle
import numpy as np
from astropy.utils.data_info import ParentDtypeInfo
from .table import Column, Table
class TimingTables:
"""
Object which contains two tables and various other attributes that
are useful for timing and other API tests.
"""
def __init__(self, size=1000, masked=False):
self.masked = masked
# Initialize table
self.table = Table(masked=self.masked)
# Create column with mixed types
np.random.seed(12345)
self.table["i"] = np.arange(size)
self.table["a"] = np.random.random(size) # float
self.table["b"] = np.random.random(size) > 0.5 # bool
self.table["c"] = np.random.random((size, 10)) # 2d column
self.table["d"] = np.random.choice(np.array(list(string.ascii_letters)), size)
self.extra_row = {"a": 1.2, "b": True, "c": np.repeat(1, 10), "d": "Z"}
self.extra_column = np.random.randint(0, 100, size)
self.row_indices = np.where(self.table["a"] > 0.9)[0]
self.table_grouped = self.table.group_by("d")
# Another table for testing joining
self.other_table = Table(masked=self.masked)
self.other_table["i"] = np.arange(1, size, 3)
self.other_table["f"] = np.random.random()
self.other_table.sort("f")
# Another table for testing hstack
self.other_table_2 = Table(masked=self.masked)
self.other_table_2["g"] = np.random.random(size)
self.other_table_2["h"] = np.random.random((size, 10))
self.bool_mask = self.table["a"] > 0.6
def simple_table(size=3, cols=None, kinds="ifS", masked=False):
"""
Return a simple table for testing.
Example
--------
::
>>> from astropy.table.table_helpers import simple_table
>>> print(simple_table(3, 6, masked=True, kinds='ifOS'))
a b c d e f
--- --- -------- --- --- ---
-- 1.0 {'c': 2} -- 5 5.0
2 2.0 -- e 6 --
3 -- {'e': 4} f -- 7.0
Parameters
----------
size : int
Number of table rows
cols : int, optional
Number of table columns. Defaults to number of kinds.
kinds : str
String consisting of the column dtype.kinds. This string
will be cycled through to generate the column dtype.
The allowed values are 'i', 'f', 'S', 'O'.
Returns
-------
out : `Table`
New table with appropriate characteristics
"""
if cols is None:
cols = len(kinds)
if cols > 26:
raise ValueError("Max 26 columns in SimpleTable")
columns = []
names = [chr(ord("a") + ii) for ii in range(cols)]
letters = np.array([c for c in string.ascii_letters])
for jj, kind in zip(range(cols), cycle(kinds)):
if kind == "i":
data = np.arange(1, size + 1, dtype=np.int64) + jj
elif kind == "f":
data = np.arange(size, dtype=np.float64) + jj
elif kind == "S":
indices = (np.arange(size) + jj) % len(letters)
data = letters[indices]
elif kind == "O":
indices = (np.arange(size) + jj) % len(letters)
vals = letters[indices]
data = [{val: index} for val, index in zip(vals, indices)]
else:
raise ValueError("Unknown data kind")
columns.append(Column(data))
table = Table(columns, names=names, masked=masked)
if masked:
for ii, col in enumerate(table.columns.values()):
mask = np.array((np.arange(size) + ii) % 3, dtype=bool)
col.mask = ~mask
return table
def complex_table():
"""
Return a masked table from the io.votable test set that has a wide variety
of stressing types.
"""
import warnings
from astropy.io.votable.table import parse
from astropy.utils.data import get_pkg_data_filename
with warnings.catch_warnings():
warnings.simplefilter("ignore")
votable = parse(
get_pkg_data_filename("../io/votable/tests/data/regression.xml"),
pedantic=False,
)
first_table = votable.get_first_table()
table = first_table.to_table()
return table
class ArrayWrapperInfo(ParentDtypeInfo):
_represent_as_dict_primary_data = "data"
def _represent_as_dict(self):
"""Represent Column as a dict that can be serialized."""
col = self._parent
out = {"data": col.data}
return out
def _construct_from_dict(self, map):
"""Construct Column from ``map``."""
data = map.pop("data")
out = self._parent_cls(data, **map)
return out
class ArrayWrapper:
"""
Minimal mixin using a simple wrapper around a numpy array.
TODO: think about the future of this class as it is mostly for demonstration
purposes (of the mixin protocol). Consider taking it out of core and putting
it into a tutorial. One advantage of having this in core is that it is
getting tested in the mixin testing though it doesn't work for multidim
data.
"""
info = ArrayWrapperInfo()
def __init__(self, data, copy=True):
self.data = np.array(data, copy=copy)
if "info" in getattr(data, "__dict__", ()):
self.info = data.info
def __getitem__(self, item):
if isinstance(item, (int, np.integer)):
out = self.data[item]
else:
out = self.__class__(self.data[item], copy=False)
if "info" in self.__dict__:
out.info = self.info
return out
def __setitem__(self, item, value):
self.data[item] = value
def __len__(self):
return len(self.data)
def __eq__(self, other):
"""Minimal equality testing, mostly for mixin unit tests."""
if isinstance(other, ArrayWrapper):
return self.data == other.data
else:
return self.data == other
@property
def dtype(self):
return self.data.dtype
@property
def shape(self):
return self.data.shape
def __repr__(self):
return f"<{self.__class__.__name__} name='{self.info.name}' data={self.data}>"
|
8e2aca32e10d56a2bcb21149f7c6d5dfce7c58fde023397b163e3e96cb054a6f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import math
import numpy as np
from astropy.modeling import models
from astropy.modeling.core import Fittable1DModel, Fittable2DModel
from .core import Kernel, Kernel1D, Kernel2D
from .utils import has_even_axis, raise_even_kernel_exception
__all__ = [
"Gaussian1DKernel",
"Gaussian2DKernel",
"CustomKernel",
"Box1DKernel",
"Box2DKernel",
"Tophat2DKernel",
"Trapezoid1DKernel",
"RickerWavelet1DKernel",
"RickerWavelet2DKernel",
"AiryDisk2DKernel",
"Moffat2DKernel",
"Model1DKernel",
"Model2DKernel",
"TrapezoidDisk2DKernel",
"Ring2DKernel",
]
def _round_up_to_odd_integer(value):
i = math.ceil(value)
if i % 2 == 0:
return i + 1
else:
return i
class Gaussian1DKernel(Kernel1D):
"""
1D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
stddev : number
Standard deviation of the Gaussian kernel.
x_size : int, optional
Size of the kernel array. Default = ⌊8*stddev+1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin. Very slow.
factor : number, optional
Factor of oversampling. Default factor = 10. If the factor
is too large, evaluation can be very slow.
See Also
--------
Box1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian1DKernel
gauss_1D_kernel = Gaussian1DKernel(10)
plt.plot(gauss_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, stddev, **kwargs):
self._model = models.Gaussian1D(1.0 / (np.sqrt(2 * np.pi) * stddev), 0, stddev)
self._default_size = _round_up_to_odd_integer(8 * stddev)
super().__init__(**kwargs)
self.normalize()
class Gaussian2DKernel(Kernel2D):
"""
2D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
x_stddev : float
Standard deviation of the Gaussian in x before rotating by theta.
y_stddev : float
Standard deviation of the Gaussian in y before rotating by theta.
theta : float or `~astropy.units.Quantity` ['angle']
Rotation angle. If passed as a float, it is assumed to be in radians.
The rotation angle increases counterclockwise.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*stddev + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*stddev + 1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian2DKernel
gaussian_2D_kernel = Gaussian2DKernel(10)
plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs):
if y_stddev is None:
y_stddev = x_stddev
self._model = models.Gaussian2D(
amplitude=1.0 / (2 * np.pi * x_stddev * y_stddev),
x_mean=0,
y_mean=0,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
)
self._default_size = _round_up_to_odd_integer(8 * np.max([x_stddev, y_stddev]))
super().__init__(**kwargs)
self.normalize()
class Box1DKernel(Kernel1D):
"""
1D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts when applied repeatedly to the same data.
The generated kernel is normalized so that it integrates to 1.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2. E.g a Box kernel with an effective
smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5].
Parameters
----------
width : number
Width of the filter kernel.
mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'linear_interp' (default)
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response function:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box1DKernel
box_1D_kernel = Box1DKernel(9)
plt.plot(box_1D_kernel, drawstyle='steps')
plt.xlim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box1D(1.0 / width, 0, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs["mode"] = "linear_interp"
super().__init__(**kwargs)
self.normalize()
class Box2DKernel(Kernel2D):
"""
2D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts when applied repeatedly to the same data.
The generated kernel is normalized so that it integrates to 1.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2.
Parameters
----------
width : number
Width of the filter kernel.
mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'linear_interp' (default)
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box2DKernel
box_2D_kernel = Box2DKernel(9)
plt.imshow(box_2D_kernel, interpolation='none', origin='lower',
vmin=0.0, vmax=0.015)
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box2D(1.0 / width**2, 0, 0, width, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs["mode"] = "linear_interp"
super().__init__(**kwargs)
self.normalize()
class Tophat2DKernel(Kernel2D):
"""
2D Tophat filter kernel.
The Tophat filter is an isotropic smoothing filter. It can produce
artifacts when applied repeatedly on the same data.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : int
Radius of the filter kernel.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Tophat2DKernel
tophat_2D_kernel = Tophat2DKernel(40)
plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius, **kwargs):
self._model = models.Disk2D(1.0 / (np.pi * radius**2), 0, 0, radius)
self._default_size = _round_up_to_odd_integer(2 * radius)
super().__init__(**kwargs)
self.normalize()
class Ring2DKernel(Kernel2D):
"""
2D Ring filter kernel.
The Ring filter kernel is the difference between two Tophat kernels of
different width. This kernel is useful for, e.g., background estimation.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius_in : number
Inner radius of the ring kernel.
width : number
Width of the ring kernel.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Ring2DKernel
ring_2D_kernel = Ring2DKernel(9, 8)
plt.imshow(ring_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius_in, width, **kwargs):
radius_out = radius_in + width
self._model = models.Ring2D(
1.0 / (np.pi * (radius_out**2 - radius_in**2)), 0, 0, radius_in, width
)
self._default_size = _round_up_to_odd_integer(2 * radius_out)
super().__init__(**kwargs)
self.normalize()
class Trapezoid1DKernel(Kernel1D):
"""
1D trapezoid kernel.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
width : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Trapezoid1DKernel
trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2)
plt.plot(trapezoid_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('amplitude')
plt.xlim(-1, 28)
plt.show()
"""
_is_bool = False
def __init__(self, width, slope=1.0, **kwargs):
self._model = models.Trapezoid1D(1, 0, width, slope)
self._default_size = _round_up_to_odd_integer(width + 2.0 / slope)
super().__init__(**kwargs)
self.normalize()
class TrapezoidDisk2DKernel(Kernel2D):
"""
2D trapezoid kernel.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import TrapezoidDisk2DKernel
trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2)
plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, slope=1.0, **kwargs):
self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope)
self._default_size = _round_up_to_odd_integer(2 * radius + 2.0 / slope)
super().__init__(**kwargs)
self.normalize()
class RickerWavelet1DKernel(Kernel1D):
"""
1D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat"
kernel).
The Ricker wavelet, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The
normalization is the same as for `scipy.ndimage.gaussian_laplace`,
except for a minus sign.
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet1DKernel
ricker_1d_kernel = RickerWavelet1DKernel(10)
plt.plot(ricker_1d_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_is_bool = True
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.sqrt(2 * np.pi) * width**3)
self._model = models.RickerWavelet1D(amplitude, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
class RickerWavelet2DKernel(Kernel2D):
"""
2D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat"
kernel).
The Ricker wavelet, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (pi * width ** 4). The normalization
is the same as for `scipy.ndimage.gaussian_laplace`, except
for a minus sign.
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width +1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet2DKernel
ricker_2d_kernel = RickerWavelet2DKernel(10)
plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.pi * width**4)
self._model = models.RickerWavelet2D(amplitude, 0, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
class AiryDisk2DKernel(Kernel2D):
"""
2D Airy disk kernel.
This kernel models the diffraction pattern of a circular aperture.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : float
The radius of the Airy disk kernel (radius of the first zero).
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import AiryDisk2DKernel
airydisk_2D_kernel = AiryDisk2DKernel(10)
plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, **kwargs):
self._model = models.AiryDisk2D(1, 0, 0, radius)
self._default_size = _round_up_to_odd_integer(8 * radius)
super().__init__(**kwargs)
self.normalize()
class Moffat2DKernel(Kernel2D):
"""
2D Moffat kernel.
This kernel is a typical model for a seeing limited PSF.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Moffat2DKernel
moffat_2D_kernel = Moffat2DKernel(3, 2)
plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, gamma, alpha, **kwargs):
# Compute amplitude, from
# https://en.wikipedia.org/wiki/Moffat_distribution
amplitude = (alpha - 1.0) / (np.pi * gamma * gamma)
self._model = models.Moffat2D(amplitude, 0, 0, gamma, alpha)
self._default_size = _round_up_to_odd_integer(4.0 * self._model.fwhm)
super().__init__(**kwargs)
self.normalize()
class Model1DKernel(Kernel1D):
"""
Create kernel from 1D model.
The model has to be centered on x = 0.
Parameters
----------
model : `~astropy.modeling.Fittable1DModel`
Kernel response function model
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
Must be odd.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable1DModel`
See also
--------
Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian1D model:
>>> from astropy.modeling.models import Gaussian1D
>>> from astropy.convolution.kernels import Model1DKernel
>>> gauss = Gaussian1D(1, 0, 2)
And create a custom one dimensional kernel from it:
>>> gauss_kernel = Model1DKernel(gauss, x_size=9)
This kernel can now be used like a usual Astropy kernel.
"""
_separable = False
_is_bool = False
def __init__(self, model, **kwargs):
if isinstance(model, Fittable1DModel):
self._model = model
else:
raise TypeError("Must be Fittable1DModel")
super().__init__(**kwargs)
class Model2DKernel(Kernel2D):
"""
Create kernel from 2D model.
The model has to be centered on x = 0 and y = 0.
Parameters
----------
model : `~astropy.modeling.Fittable2DModel`
Kernel response function model
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
Must be odd.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width +1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable2DModel`
See also
--------
Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian2D model:
>>> from astropy.modeling.models import Gaussian2D
>>> from astropy.convolution.kernels import Model2DKernel
>>> gauss = Gaussian2D(1, 0, 0, 2, 2)
And create a custom two dimensional kernel from it:
>>> gauss_kernel = Model2DKernel(gauss, x_size=9)
This kernel can now be used like a usual astropy kernel.
"""
_is_bool = False
_separable = False
def __init__(self, model, **kwargs):
self._separable = False
if isinstance(model, Fittable2DModel):
self._model = model
else:
raise TypeError("Must be Fittable2DModel")
super().__init__(**kwargs)
class CustomKernel(Kernel):
"""
Create filter kernel from list or array.
Parameters
----------
array : list or array
Filter kernel array. Size must be odd.
Raises
------
TypeError
If array is not a list or array.
`~astropy.convolution.KernelSizeError`
If array size is even.
See also
--------
Model2DKernel, Model1DKernel
Examples
--------
Define one dimensional array:
>>> from astropy.convolution.kernels import CustomKernel
>>> import numpy as np
>>> array = np.array([1, 2, 3, 2, 1])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
1
Define two dimensional array:
>>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
2
"""
def __init__(self, array):
self.array = array
super().__init__(self._array)
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
@array.setter
def array(self, array):
"""
Filter kernel array setter.
"""
if isinstance(array, np.ndarray):
self._array = array.astype(np.float64)
elif isinstance(array, list):
self._array = np.array(array, dtype=np.float64)
else:
raise TypeError("Must be list or array.")
# Check if array is odd in all axes
if has_even_axis(self):
raise_even_kernel_exception()
# Check if array is bool
ones = self._array == 1.0
zeros = self._array == 0
self._is_bool = bool(np.all(np.logical_or(ones, zeros)))
|
9d72e9c381b173a7c7b99ef391e8ad12280f082ea152efc8834ee4866083701b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the convolution and filter functionalities of astropy.
A few conceptual notes:
A filter kernel is mainly characterized by its response function. In the 1D
case we speak of "impulse response function", in the 2D case we call it "point
spread function". This response function is given for every kernel by an
astropy `FittableModel`, which is evaluated on a grid to obtain a filter array,
which can then be applied to binned data.
The model is centered on the array and should have an amplitude such that the array
integrates to one per default.
Currently only symmetric 2D kernels are supported.
"""
import copy
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .utils import add_kernel_arrays_1D, add_kernel_arrays_2D, discretize_model
MAX_NORMALIZATION = 100
__all__ = ["Kernel", "Kernel1D", "Kernel2D", "kernel_arithmetics"]
class Kernel:
"""
Convolution kernel base class.
Parameters
----------
array : ndarray
Kernel array.
"""
_separable = False
_is_bool = True
_model = None
def __init__(self, array):
self._array = np.asanyarray(array)
@property
def truncation(self):
"""
Absolute deviation of the sum of the kernel array values from
one.
"""
return np.abs(1.0 - self._array.sum())
@property
def is_bool(self):
"""
Indicates if kernel is bool.
If the kernel is bool the multiplication in the convolution could
be omitted, to increase the performance.
"""
return self._is_bool
@property
def model(self):
"""
Kernel response model.
"""
return self._model
@property
def dimension(self):
"""
Kernel dimension.
"""
return self.array.ndim
@property
def center(self):
"""
Index of the kernel center.
"""
return [axes_size // 2 for axes_size in self._array.shape]
def normalize(self, mode="integral"):
"""
Normalize the filter kernel.
Parameters
----------
mode : {'integral', 'peak'}
One of the following modes:
* 'integral' (default)
Kernel is normalized such that its integral = 1.
* 'peak'
Kernel is normalized such that its peak = 1.
"""
if mode == "integral":
normalization = self._array.sum()
elif mode == "peak":
normalization = self._array.max()
else:
raise ValueError("invalid mode, must be 'integral' or 'peak'")
# Warn the user for kernels that sum to zero
if normalization == 0:
warnings.warn(
"The kernel cannot be normalized because it sums to zero.",
AstropyUserWarning,
)
else:
np.divide(self._array, normalization, self._array)
self._kernel_sum = self._array.sum()
@property
def shape(self):
"""
Shape of the kernel array.
"""
return self._array.shape
@property
def separable(self):
"""
Indicates if the filter kernel is separable.
A 2D filter is separable, when its filter array can be written as the
outer product of two 1D arrays.
If a filter kernel is separable, higher dimension convolutions will be
performed by applying the 1D filter array consecutively on every dimension.
This is significantly faster, than using a filter array with the same
dimension.
"""
return self._separable
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
def __add__(self, kernel):
"""
Add two filter kernels.
"""
return kernel_arithmetics(self, kernel, "add")
def __sub__(self, kernel):
"""
Subtract two filter kernels.
"""
return kernel_arithmetics(self, kernel, "sub")
def __mul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __rmul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __array__(self):
"""
Array representation of the kernel.
"""
return self._array
def __array_wrap__(self, array, context=None):
"""
Wrapper for multiplication with numpy arrays.
"""
if type(context[0]) == np.ufunc:
return NotImplemented
else:
return array
class Kernel1D(Kernel):
"""
Base class for 1D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : int or None, optional
Size of the kernel array. Default = ⌊8*width+1⌋.
Only used if ``array`` is None.
array : ndarray or None, optional
Kernel array.
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, array=None, **kwargs):
# Initialize from model
if self._model:
if array is not None:
# Reject "array" keyword for kernel models, to avoid them not being
# populated as expected.
raise TypeError("Array argument not allowed for kernel models.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, **kwargs)
# Initialize from array
elif array is None:
raise TypeError("Must specify either array or model.")
super().__init__(array)
class Kernel2D(Kernel):
"""
Base class for 2D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width + 1⌋.
Only used if ``array`` is None.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width + 1⌋.
Only used if ``array`` is None,
array : ndarray or None, optional
Kernel array. Default is None.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
width : number
Width of the filter kernel.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, y_size=None, array=None, **kwargs):
# Initialize from model
if self._model:
if array is not None:
# Reject "array" keyword for kernel models, to avoid them not being
# populated as expected.
raise TypeError("Array argument not allowed for kernel models.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
if y_size is None:
y_size = x_size
elif y_size != int(y_size):
raise TypeError("y_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
if y_size % 2 == 0: # even kernel
y_range = (-(int(y_size)) // 2 + 0.5, (int(y_size)) // 2 + 0.5)
else: # odd kernel
y_range = (-(int(y_size) - 1) // 2, (int(y_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, y_range, **kwargs)
# Initialize from array
elif array is None:
raise TypeError("Must specify either array or model.")
super().__init__(array)
def kernel_arithmetics(kernel, value, operation):
"""
Add, subtract or multiply two kernels.
Parameters
----------
kernel : `astropy.convolution.Kernel`
Kernel instance.
value : `astropy.convolution.Kernel`, float, or int
Value to operate with.
operation : {'add', 'sub', 'mul'}
One of the following operations:
* 'add'
Add two kernels
* 'sub'
Subtract two kernels
* 'mul'
Multiply kernel with number or convolve two kernels.
"""
# 1D kernels
if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D):
if operation == "add":
new_array = add_kernel_arrays_1D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_1D(kernel.array, -value.array)
if operation == "mul":
raise Exception(
"Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead."
)
new_kernel = Kernel1D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# 2D kernels
elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D):
if operation == "add":
new_array = add_kernel_arrays_2D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_2D(kernel.array, -value.array)
if operation == "mul":
raise Exception(
"Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead."
)
new_kernel = Kernel2D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# kernel and number
elif isinstance(kernel, (Kernel1D, Kernel2D)) and np.isscalar(value):
if operation == "mul":
new_kernel = copy.copy(kernel)
new_kernel._array *= value
else:
raise Exception("Kernel operation not supported.")
else:
raise Exception("Kernel operation not supported.")
return new_kernel
|
ffc6d7e29bae9f147bb1cc52083c392c99f6f25d1d8d75aa5d294660e0b0ea83 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from functools import partial
import numpy as np
from astropy import units as u
from astropy.modeling.convolution import Convolution
from astropy.modeling.core import SPECIAL_OPERATORS, CompoundModel
from astropy.nddata import support_nddata
from astropy.utils.console import human_file_size
from astropy.utils.exceptions import AstropyUserWarning
from ._convolve import _convolveNd_c
from .core import MAX_NORMALIZATION, Kernel, Kernel1D, Kernel2D
from .utils import KernelSizeError, has_even_axis, raise_even_kernel_exception
# np.unique([scipy.fft.next_fast_len(i, real=True) for i in range(10000)])
# fmt: off
_good_sizes = np.array(
[
0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12,
15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40,
45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90,
96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162,
180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288,
300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480,
486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720,
729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024,
1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458,
1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025,
2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700,
2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645,
3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800,
4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144,
6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776,
8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000,
]
)
# fmt: on
_good_range = int(np.log10(_good_sizes[-1]))
# Disabling doctests when scipy isn't present.
__doctest_requires__ = {("convolve_fft",): ["scipy.fft"]}
BOUNDARY_OPTIONS = [None, "fill", "wrap", "extend"]
def _next_fast_lengths(shape):
"""
Find optimal or good sizes to pad an array of ``shape`` to for better
performance with `numpy.fft.*fft` and `scipy.fft.*fft`.
Calculated directly with `scipy.fft.next_fast_len`, if available; otherwise
looked up from list and scaled by powers of 10, if necessary.
"""
try:
import scipy.fft
return np.array([scipy.fft.next_fast_len(j) for j in shape])
except ImportError:
pass
newshape = np.empty(len(np.atleast_1d(shape)), dtype=int)
for i, j in enumerate(shape):
scale = 10 ** max(int(np.ceil(np.log10(j))) - _good_range, 0)
for n in _good_sizes:
if n * scale >= j:
newshape[i] = n * scale
break
else:
raise ValueError(
f"No next fast length for {j} found in list of _good_sizes "
f"<= {_good_sizes[-1] * scale}."
)
return newshape
def _copy_input_if_needed(
input, dtype=float, order="C", nan_treatment=None, mask=None, fill_value=None
):
# Alias input
input = input.array if isinstance(input, Kernel) else input
# strip quantity attributes
if hasattr(input, "unit"):
input = input.value
output = input
# Copy input
try:
# Anything that's masked must be turned into NaNs for the interpolation.
# This requires copying. A copy is also needed for nan_treatment == 'fill'
# A copy prevents possible function side-effects of the input array.
if nan_treatment == "fill" or np.ma.is_masked(input) or mask is not None:
if np.ma.is_masked(input):
# ``np.ma.maskedarray.filled()`` returns a copy, however there
# is no way to specify the return type or order etc. In addition
# ``np.nan`` is a ``float`` and there is no conversion to an
# ``int`` type. Therefore, a pre-fill copy is needed for non
# ``float`` masked arrays. ``subok=True`` is needed to retain
# ``np.ma.maskedarray.filled()``. ``copy=False`` allows the fill
# to act as the copy if type and order are already correct.
output = np.array(
input, dtype=dtype, copy=False, order=order, subok=True
)
output = output.filled(fill_value)
else:
# Since we're making a copy, we might as well use `subok=False` to save,
# what is probably, a negligible amount of memory.
output = np.array(
input, dtype=dtype, copy=True, order=order, subok=False
)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
output[mask != 0] = fill_value
else:
# The call below is synonymous with np.asanyarray(array, ftype=float, order='C')
# The advantage of `subok=True` is that it won't copy when array is an ndarray subclass.
# If it is and `subok=False` (default), then it will copy even if `copy=False`. This
# uses less memory when ndarray subclasses are passed in.
output = np.array(input, dtype=dtype, copy=False, order=order, subok=True)
except (TypeError, ValueError) as e:
raise TypeError(
"input should be a Numpy array or something convertible into a float array",
e,
)
return output
@support_nddata(data="array")
def convolve(
array,
kernel,
boundary="fill",
fill_value=0.0,
nan_treatment="interpolate",
normalize_kernel=True,
mask=None,
preserve_nan=False,
normalization_zero_tol=1e-8,
):
"""
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `~astropy.nddata.NDData` or array-like
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array.
* 'fill'
Set values outside the array boundary to ``fill_value`` (default).
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``.
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one.
nan_treatment : {'interpolate', 'fill'}, optional
The method used to handle NaNs in the input ``array``:
* ``'interpolate'``: ``NaN`` values are replaced with
interpolated values using the kernel as an interpolation
function. Note that if the kernel has a sum equal to
zero, NaN interpolation is not possible and will raise an
exception.
* ``'fill'``: ``NaN`` values are replaced by ``fill_value``
prior to convolution.
preserve_nan : bool, optional
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : None or ndarray, optional
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol : float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
"""
if boundary not in BOUNDARY_OPTIONS:
raise ValueError(f"Invalid boundary option: must be one of {BOUNDARY_OPTIONS}")
if nan_treatment not in ("interpolate", "fill"):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# OpenMP support is disabled at the C src code level, changing this will have
# no effect.
n_threads = 1
# Keep refs to originals
passed_kernel = kernel
passed_array = array
# The C routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# Convert kernel to ndarray if not already
# Copy or alias array to array_internal
array_internal = _copy_input_if_needed(
passed_array,
dtype=float,
order="C",
nan_treatment=nan_treatment,
mask=mask,
fill_value=np.nan,
)
array_dtype = getattr(passed_array, "dtype", array_internal.dtype)
# Copy or alias kernel to kernel_internal
kernel_internal = _copy_input_if_needed(
passed_kernel,
dtype=float,
order="C",
nan_treatment=None,
mask=None,
fill_value=fill_value,
)
# Make sure kernel has all odd axes
if has_even_axis(kernel_internal):
raise_even_kernel_exception()
# If both image array and kernel are Kernel instances
# constrain convolution method
# This must occur before the main alias/copy of ``passed_kernel`` to
# ``kernel_internal`` as it is used for filling masked kernels.
if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel):
warnings.warn(
"Both array and kernel are Kernel instances, hardwiring "
"the following parameters: boundary='fill', fill_value=0,"
" normalize_Kernel=True, nan_treatment='interpolate'",
AstropyUserWarning,
)
boundary = "fill"
fill_value = 0
normalize_kernel = True
nan_treatment = "interpolate"
# -----------------------------------------------------------------------
# From this point onwards refer only to ``array_internal`` and
# ``kernel_internal``.
# Assume both are base np.ndarrays and NOT subclasses e.g. NOT
# ``Kernel`` nor ``np.ma.maskedarray`` classes.
# -----------------------------------------------------------------------
# Check dimensionality
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim > 3:
raise NotImplementedError(
"convolve only supports 1, 2, and 3-dimensional arrays at this time"
)
elif array_internal.ndim != kernel_internal.ndim:
raise Exception("array and kernel have differing number of dimensions.")
array_shape = np.array(array_internal.shape)
kernel_shape = np.array(kernel_internal.shape)
pad_width = kernel_shape // 2
# For boundary=None only the center space is convolved. All array indices within a
# distance kernel.shape//2 from the edge are completely ignored (zeroed).
# E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2
# are convolved. It is therefore not possible to use this method to convolve an
# array by a kernel that is larger (see note below) than the array - as ALL pixels
# would be ignored leaving an array of only zeros.
# Note: For even kernels the correctness condition is array_shape > kernel_shape.
# For odd kernels it is:
# array_shape >= kernel_shape OR
# array_shape > kernel_shape-1 OR
# array_shape > 2*(kernel_shape//2).
# Since the latter is equal to the former two for even lengths, the latter condition is
# complete.
if boundary is None and not np.all(array_shape > 2 * pad_width):
raise KernelSizeError(
"for boundary=None all kernel axes must be smaller than array's - "
"use boundary in ['fill', 'extend', 'wrap'] instead."
)
# NaN interpolation significantly slows down the C convolution
# computation. Since nan_treatment = 'interpolate', is the default
# check whether it is even needed, if not, don't interpolate.
# NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any()
nan_interpolate = (nan_treatment == "interpolate") and np.isnan(
array_internal.sum()
)
# Check if kernel is normalizable
if normalize_kernel or nan_interpolate:
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if kernel_sum < 1.0 / MAX_NORMALIZATION or kernel_sums_to_zero:
if nan_interpolate:
raise ValueError(
"Setting nan_treatment='interpolate' "
"requires the kernel to be normalized, "
"but the input kernel has a sum close "
"to zero. For a zero-sum kernel and "
"data with NaNs, set nan_treatment='fill'."
)
else:
raise ValueError(
"The kernel can't be normalized, because "
"its sum is close to zero. The sum of the "
f"given kernel is < {1.0 / MAX_NORMALIZATION}"
)
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan or nan_treatment == "fill":
initially_nan = np.isnan(array_internal)
if nan_treatment == "fill":
array_internal[initially_nan] = fill_value
# Avoid any memory allocation within the C code. Allocate output array
# here and pass through instead.
result = np.zeros(array_internal.shape, dtype=float, order="C")
embed_result_within_padded_region = True
array_to_convolve = array_internal
if boundary in ("fill", "extend", "wrap"):
embed_result_within_padded_region = False
if boundary == "fill":
# This method is faster than using numpy.pad(..., mode='constant')
array_to_convolve = np.full(
array_shape + 2 * pad_width,
fill_value=fill_value,
dtype=float,
order="C",
)
# Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of
# [pad_width[0]:-pad_width[0]]
# to account for when the kernel has size of 1 making pad_width = 0.
if array_internal.ndim == 1:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0]
] = array_internal
elif array_internal.ndim == 2:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0],
pad_width[1] : array_shape[1] + pad_width[1],
] = array_internal
else:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0],
pad_width[1] : array_shape[1] + pad_width[1],
pad_width[2] : array_shape[2] + pad_width[2],
] = array_internal
else:
np_pad_mode_dict = {"fill": "constant", "extend": "edge", "wrap": "wrap"}
np_pad_mode = np_pad_mode_dict[boundary]
pad_width = kernel_shape // 2
if array_internal.ndim == 1:
np_pad_width = (pad_width[0],)
elif array_internal.ndim == 2:
np_pad_width = ((pad_width[0],), (pad_width[1],))
else:
np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],))
array_to_convolve = np.pad(
array_internal, pad_width=np_pad_width, mode=np_pad_mode
)
_convolveNd_c(
result,
array_to_convolve,
kernel_internal,
nan_interpolate,
embed_result_within_padded_region,
n_threads,
)
# So far, normalization has only occurred for nan_treatment == 'interpolate'
# because this had to happen within the C extension so as to ignore
# any NaNs
if normalize_kernel:
if not nan_interpolate:
result /= kernel_sum
elif nan_interpolate:
result *= kernel_sum
if nan_interpolate and not preserve_nan and np.isnan(result.sum()):
warnings.warn(
"nan_treatment='interpolate', however, NaN values detected "
"post convolution. A contiguous region of NaN values, larger "
"than the kernel size, are present in the input array. "
"Increase the kernel size to avoid this.",
AstropyUserWarning,
)
if preserve_nan:
result[initially_nan] = np.nan
# Convert result to original data type
array_unit = getattr(passed_array, "unit", None)
if array_unit is not None:
result <<= array_unit
if isinstance(passed_array, Kernel):
if isinstance(passed_array, Kernel1D):
new_result = Kernel1D(array=result)
elif isinstance(passed_array, Kernel2D):
new_result = Kernel2D(array=result)
else:
raise TypeError("Only 1D and 2D Kernels are supported.")
new_result._is_bool = False
new_result._separable = passed_array._separable
if isinstance(passed_kernel, Kernel):
new_result._separable = new_result._separable and passed_kernel._separable
return new_result
elif array_dtype.kind == "f":
# Try to preserve the input type if it's a floating point type
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@support_nddata(data="array")
def convolve_fft(
array,
kernel,
boundary="fill",
fill_value=0.0,
nan_treatment="interpolate",
normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False,
mask=None,
crop=True,
return_fft=False,
fft_pad=None,
psf_pad=None,
min_wt=0.0,
allow_huge=False,
fftn=np.fft.fftn,
ifftn=np.fft.ifftn,
complex_dtype=complex,
dealias=False,
):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* It optionally pads to the nearest faster sizes to improve FFT speed.
These sizes are optimized for the numpy and scipy implementations, and
``fftconvolve`` uses them by default as well; when using other external
functions (see below), results may vary.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.org/project/pyFFTW/>`_ or
`pyFFTW3 <https://pypi.org/project/PyFFTW3/0.2.1/>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fftn` and `numpy.fft.ifftn`. The `scipy.fft` functions also
offer somewhat better performance and a multi-threaded option.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution.
fill_value : float, optional
The value to use outside the array when using boundary='fill'.
nan_treatment : {'interpolate', 'fill'}, optional
The method used to handle NaNs in the input ``array``:
* ``'interpolate'``: ``NaN`` values are replaced with
interpolated values using the kernel as an interpolation
function. Note that if the kernel has a sum equal to
zero, NaN interpolation is not possible and will raise an
exception.
* ``'fill'``: ``NaN`` values are replaced by ``fill_value``
prior to convolution.
normalize_kernel : callable or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol : float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool, optional
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : None or ndarray, optional
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fft_pad : bool, optional
Default on. Zero-pad image to the nearest size supporting more efficient
execution of the FFT, generally values factorizable into the first 3-5
prime numbers. With ``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below.
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB.
fftn : callable, optional
The fft function. Can be overridden to use your own ffts,
e.g. an fftw3 wrapper or scipy's fftn, ``fft=scipy.fftpack.fftn``.
ifftn : callable, optional
The inverse fft function. Can be overridden the same way ``fttn``.
complex_dtype : complex type, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
dealias: bool, optional
Default off. Zero-pad image to enable explicit dealiasing
of convolution. With ``boundary='wrap'``, this will be disabled.
Note that for an input of nd dimensions this will increase
the size of the temporary arrays by at least ``1.5**nd``.
This may result in significantly more memory usage.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size.
Raises
------
`ValueError`
If the array is bigger than 1 GB after padding, will raise this
exception unless ``allow_huge`` is True.
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data
can become large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 and the update in
https://github.com/astropy/astropy/pull/11533 for further details.
Dealiasing of pseudospectral convolutions is necessary for
numerical stability of the underlying algorithms. A common
method for handling this is to zero pad the image by at least
1/2 to eliminate the wavenumbers which have been aliased
by convolution. This is so that the aliased 1/3 of the
results of the convolution computation can be thrown out. See
https://doi.org/10.1175/1520-0469(1971)028%3C1074:OTEOAI%3E2.0.CO;2
https://iopscience.iop.org/article/10.1088/1742-6596/318/7/072037
Note that if dealiasing is necessary to your application, but your
process is memory constrained, you may want to consider using
FFTW++: https://github.com/dealias/fftwpp. It includes python
wrappers for a pseudospectral convolution which will implicitly
dealias your convolution without the need for additional padding.
Note that one cannot use FFTW++'s convlution directly in this
method as in handles the entire convolution process internally.
Additionally, FFTW++ includes other useful pseudospectral methods to
consider.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([0.33333333, 1.33333333, 1. ])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([0.5, 2. , 1.5])
>>> convolve_fft([1, 0, 3], [0, 1, 0]) # doctest: +FLOAT_CMP
array([ 1.00000000e+00, -3.70074342e-17, 3.00000000e+00])
>>> convolve_fft([1, 2, 3], [1])
array([1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
array([1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([0.5, 2. , 1.5])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([0.5, 2. , 1.5])
>>> import scipy.fft # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fft.fftn, ifftn=scipy.fft.ifftn)
array([0.5, 2. , 1.5])
>>> fft_mp = lambda a: scipy.fft.fftn(a, workers=-1) # use all available cores
>>> ifft_mp = lambda a: scipy.fft.ifftn(a, workers=-1)
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True, fftn=fft_mp, ifftn=ifft_mp)
array([0.5, 2. , 1.5])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError(
"Can't convolve two kernels with convolve_fft. Use convolve instead."
)
if nan_treatment not in ("interpolate", "fill"):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# Get array quantity if it exists
array_unit = getattr(array, "unit", None)
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = _copy_input_if_needed(
array,
dtype=complex,
order="C",
nan_treatment=nan_treatment,
mask=mask,
fill_value=np.nan,
)
kernel = _copy_input_if_needed(
kernel, dtype=complex, order="C", nan_treatment=None, mask=None, fill_value=0
)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (
np.product(arrayshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize
) * u.byte
if array_size_B > 1 * u.GB and not allow_huge:
raise ValueError(
f"Size Error: Arrays will be {human_file_size(array_size_B)}. "
"Use allow_huge=True to override this exception."
)
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
if nan_treatment == "fill":
array[nanmaskarray] = fill_value
else:
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1.0 / MAX_NORMALIZATION:
raise Exception(
"The kernel can't be normalized, because its sum is close to zero. The"
f" sum of the given kernel is < {1.0 / MAX_NORMALIZATION}"
)
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == "interpolate":
raise ValueError(
"Cannot interpolate NaNs with an unnormalizable kernel"
)
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn(
"The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary",
AstropyUserWarning,
)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == "fill":
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn(
f"psf_pad was set to {psf_pad}, which overrides the "
"boundary='fill' setting.",
AstropyUserWarning,
)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == "wrap":
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
if dealias:
raise ValueError("With boundary='wrap', dealias cannot be enabled.")
fill_value = 0 # force zero; it should not be used
elif boundary == "extend":
raise NotImplementedError(
"The 'extend' option is not implemented for fft-based convolution"
)
# Add shapes elementwise for psf_pad.
if psf_pad: # default=False
# add the sizes along each dimension (bigger)
newshape = np.array(arrayshape) + np.array(kernshape)
else:
# take the larger shape in each dimension (smaller)
newshape = np.maximum(arrayshape, kernshape)
if dealias:
# Extend shape by 1/2 for dealiasing
newshape += np.ceil(newshape / 2).astype(int)
# Find ideal size for fft (was power of 2, now any powers of prime factors 2, 3, 5).
if fft_pad: # default=True
# Get optimized sizes from scipy.
newshape = _next_fast_lengths(newshape)
# perform a second check after padding
array_size_C = (
np.product(newshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize
) * u.byte
if array_size_C > 1 * u.GB and not allow_huge:
raise ValueError(
f"Size Error: Arrays will be {human_file_size(array_size_C)}. "
"Use allow_huge=True to override this exception."
)
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.product(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.product(arrayshape)*np.dtype(bool).itemsize
# + np.product(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for newdimsize, arraydimsize, kerndimsize in zip(newshape, arrayshape, kernshape):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [
slice(center - arraydimsize // 2, center + (arraydimsize + 1) // 2)
]
kernslices += [
slice(center - kerndimsize // 2, center + (kerndimsize + 1) // 2)
]
arrayslices = tuple(arrayslices)
kernslices = tuple(kernslices)
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = nan_treatment == "interpolate"
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
fftmult *= kernel_scale
if array_unit is not None:
fftmult <<= array_unit
if return_fft:
return fftmult
if interpolate_nan:
with np.errstate(divide="ignore", invalid="ignore"):
# divide by zeros are expected here; if the weight is zero, we want
# the output to be nan or inf
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
if min_wt > 0.0:
rifft[bigimwt < min_wt] = np.nan
else:
# Set anything with no weight to zero (taking into account
# slight offsets due to floating-point errors).
rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0
else:
rifft = ifftn(fftmult)
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(
array,
kernel,
nan_treatment="interpolate",
normalize_kernel=True,
preserve_nan=False,
**kwargs,
)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode="convolve_fft", **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
**kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
if mode == "convolve_fft":
operator = SPECIAL_OPERATORS.add(
"convolve_fft", partial(convolve_fft, **kwargs)
)
elif mode == "convolve":
operator = SPECIAL_OPERATORS.add("convolve", partial(convolve, **kwargs))
else:
raise ValueError(f"Mode {mode} is not supported.")
return CompoundModel(operator, model, kernel)
def convolve_models_fft(model, kernel, bounding_box, resolution, cache=True, **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
bounding_box : tuple
The bounding box which encompasses enough of the support of both
the ``model`` and ``kernel`` so that an accurate convolution can be
computed.
resolution : float
The resolution that one wishes to approximate the convolution
integral at.
cache : optional, bool
Default value True. Allow for the storage of the convolution
computation for later reuse.
**kwargs : dict
Keyword arguments to be passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
operator = SPECIAL_OPERATORS.add("convolve_fft", partial(convolve_fft, **kwargs))
return Convolution(operator, model, kernel, bounding_box, resolution, cache)
|
ed721760021421c5cf30abf6e76f17df2a189282a47d5374ccbbebf1c02cd9c5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains classes and functions to standardize access to
configuration files for Astropy and affiliated packages.
.. note::
The configuration system makes use of the 'configobj' package, which stores
configuration in a text format like that used in the standard library
`ConfigParser`. More information and documentation for configobj can be
found at https://configobj.readthedocs.io .
"""
import contextlib
import importlib
import io
import os
import pkgutil
import warnings
from contextlib import contextmanager, nullcontext
from os import path
from textwrap import TextWrapper
from warnings import warn
from astropy.extern.configobj import configobj, validate
from astropy.utils import find_current_module, silence
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.introspection import resolve_name
from .paths import get_config_dir
__all__ = (
"InvalidConfigurationItemWarning",
"ConfigurationMissingWarning",
"get_config",
"reload_config",
"ConfigNamespace",
"ConfigItem",
"generate_config",
"create_config_file",
)
class InvalidConfigurationItemWarning(AstropyWarning):
"""A Warning that is issued when the configuration value specified in the
astropy configuration file does not match the type expected for that
configuration value.
"""
# This was raised with Astropy < 4.3 when the configuration file was not found.
# It is kept for compatibility and should be removed at some point.
@deprecated("5.0")
class ConfigurationMissingWarning(AstropyWarning):
"""A Warning that is issued when the configuration directory cannot be
accessed (usually due to a permissions problem). If this warning appears,
configuration items will be set to their defaults rather than read from the
configuration file, and no configuration will persist across sessions.
"""
# these are not in __all__ because it's not intended that a user ever see them
class ConfigurationDefaultMissingError(ValueError):
"""An exception that is raised when the configuration defaults (which
should be generated at build-time) are missing.
"""
# this is used in astropy/__init__.py
class ConfigurationDefaultMissingWarning(AstropyWarning):
"""A warning that is issued when the configuration defaults (which
should be generated at build-time) are missing.
"""
class ConfigurationChangedWarning(AstropyWarning):
"""
A warning that the configuration options have changed.
"""
class _ConfigNamespaceMeta(type):
def __init__(cls, name, bases, dict):
if cls.__bases__[0] is object:
return
for key, val in dict.items():
if isinstance(val, ConfigItem):
val.name = key
class ConfigNamespace(metaclass=_ConfigNamespaceMeta):
"""
A namespace of configuration items. Each subpackage with
configuration items should define a subclass of this class,
containing `ConfigItem` instances as members.
For example::
class Conf(_config.ConfigNamespace):
unicode_output = _config.ConfigItem(
False,
'Use Unicode characters when outputting values, ...')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when ...',
aliases=['astropy.utils.console.USE_COLOR'])
conf = Conf()
"""
def __iter__(self):
for key, val in self.__class__.__dict__.items():
if isinstance(val, ConfigItem):
yield key
keys = __iter__
"""Iterate over configuration item names."""
def values(self):
"""Iterate over configuration item values."""
for val in self.__class__.__dict__.values():
if isinstance(val, ConfigItem):
yield val
def items(self):
"""Iterate over configuration item ``(name, value)`` pairs."""
for key, val in self.__class__.__dict__.items():
if isinstance(val, ConfigItem):
yield key, val
def set_temp(self, attr, value):
"""
Temporarily set a configuration value.
Parameters
----------
attr : str
Configuration item name
value : object
The value to set temporarily.
Examples
--------
>>> import astropy
>>> with astropy.conf.set_temp('use_color', False):
... pass
... # console output will not contain color
>>> # console output contains color again...
"""
if hasattr(self, attr):
return self.__class__.__dict__[attr].set_temp(value)
raise AttributeError(f"No configuration parameter '{attr}'")
def reload(self, attr=None):
"""
Reload a configuration item from the configuration file.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reload all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
return self.__class__.__dict__[attr].reload()
raise AttributeError(f"No configuration parameter '{attr}'")
for item in self.values():
item.reload()
def reset(self, attr=None):
"""
Reset a configuration item to its default.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reset all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
prop = self.__class__.__dict__[attr]
prop.set(prop.defaultvalue)
return
raise AttributeError(f"No configuration parameter '{attr}'")
for item in self.values():
item.set(item.defaultvalue)
class ConfigItem:
"""
A setting and associated value stored in a configuration file.
These objects should be created as members of
`ConfigNamespace` subclasses, for example::
class _Conf(config.ConfigNamespace):
unicode_output = config.ConfigItem(
False,
'Use Unicode characters when outputting values, and writing widgets '
'to the console.')
conf = _Conf()
Parameters
----------
defaultvalue : object, optional
The default value for this item. If this is a list of strings, this
item will be interpreted as an 'options' value - this item must be one
of those values, and the first in the list will be taken as the default
value.
description : str or None, optional
A description of this item (will be shown as a comment in the
configuration file)
cfgtype : str or None, optional
A type specifier like those used as the *values* of a particular key
in a ``configspec`` file of ``configobj``. If None, the type will be
inferred from the default value.
module : str or None, optional
The full module name that this item is associated with. The first
element (e.g. 'astropy' if this is 'astropy.config.configuration')
will be used to determine the name of the configuration file, while
the remaining items determine the section. If None, the package will be
inferred from the package within which this object's initializer is
called.
aliases : str, or list of str, optional
The deprecated location(s) of this configuration item. If the
config item is not found at the new location, it will be
searched for at all of the old locations.
Raises
------
RuntimeError
If ``module`` is `None`, but the module this item is created from
cannot be determined.
"""
# this is used to make validation faster so a Validator object doesn't
# have to be created every time
_validator = validate.Validator()
cfgtype = None
"""
A type specifier like those used as the *values* of a particular key in a
``configspec`` file of ``configobj``.
"""
rootname = "astropy"
"""
Rootname sets the base path for all config files.
"""
def __init__(
self, defaultvalue="", description=None, cfgtype=None, module=None, aliases=None
):
from astropy.utils import isiterable
if module is None:
module = find_current_module(2)
if module is None:
msg1 = "Cannot automatically determine get_config module, "
msg2 = "because it is not called from inside a valid module"
raise RuntimeError(msg1 + msg2)
else:
module = module.__name__
self.module = module
self.description = description
self.__doc__ = description
# now determine cfgtype if it is not given
if cfgtype is None:
if isiterable(defaultvalue) and not isinstance(defaultvalue, str):
# it is an options list
dvstr = [str(v) for v in defaultvalue]
cfgtype = "option(" + ", ".join(dvstr) + ")"
defaultvalue = dvstr[0]
elif isinstance(defaultvalue, bool):
cfgtype = "boolean"
elif isinstance(defaultvalue, int):
cfgtype = "integer"
elif isinstance(defaultvalue, float):
cfgtype = "float"
elif isinstance(defaultvalue, str):
cfgtype = "string"
defaultvalue = str(defaultvalue)
self.cfgtype = cfgtype
self._validate_val(defaultvalue)
self.defaultvalue = defaultvalue
if aliases is None:
self.aliases = []
elif isinstance(aliases, str):
self.aliases = [aliases]
else:
self.aliases = aliases
def __set__(self, obj, value):
return self.set(value)
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self()
def set(self, value):
"""
Sets the current value of this ``ConfigItem``.
This also updates the comments that give the description and type
information.
Parameters
----------
value
The value this item should be set to.
Raises
------
TypeError
If the provided ``value`` is not valid for this ``ConfigItem``.
"""
try:
value = self._validate_val(value)
except validate.ValidateError as e:
raise TypeError(
f"Provided value for configuration item {self.name} not valid:"
f" {e.args[0]}"
)
sec = get_config(self.module, rootname=self.rootname)
sec[self.name] = value
@contextmanager
def set_temp(self, value):
"""
Sets this item to a specified value only inside a with block.
Use as::
ITEM = ConfigItem('ITEM', 'default', 'description')
with ITEM.set_temp('newval'):
#... do something that wants ITEM's value to be 'newval' ...
print(ITEM)
# ITEM is now 'default' after the with block
Parameters
----------
value
The value to set this item to inside the with block.
"""
initval = self()
self.set(value)
try:
yield
finally:
self.set(initval)
def reload(self):
"""Reloads the value of this ``ConfigItem`` from the relevant
configuration file.
Returns
-------
val : object
The new value loaded from the configuration file.
"""
self.set(self.defaultvalue)
baseobj = get_config(self.module, True, rootname=self.rootname)
secname = baseobj.name
cobj = baseobj
# a ConfigObj's parent is itself, so we look for the parent with that
while cobj.parent is not cobj:
cobj = cobj.parent
newobj = configobj.ConfigObj(cobj.filename, interpolation=False)
if secname is not None:
if secname not in newobj:
return baseobj.get(self.name)
newobj = newobj[secname]
if self.name in newobj:
baseobj[self.name] = newobj[self.name]
return baseobj.get(self.name)
def __repr__(self):
return (
f"<{self.__class__.__name__}: name={self.name!r} value={self()!r} at"
f" 0x{id(self):x}>"
)
def __str__(self):
return "\n".join(
(
f"{self.__class__.__name__}: {self.name}",
f" cfgtype={self.cfgtype!r}",
f" defaultvalue={self.defaultvalue!r}",
f" description={self.description!r}",
f" module={self.module}",
f" value={self()!r}",
)
)
def __call__(self):
"""Returns the value of this ``ConfigItem``.
Returns
-------
val : object
This item's value, with a type determined by the ``cfgtype``
attribute.
Raises
------
TypeError
If the configuration value as stored is not this item's type.
"""
def section_name(section):
if section == "":
return "at the top-level"
else:
return f"in section [{section}]"
options = []
sec = get_config(self.module, rootname=self.rootname)
if self.name in sec:
options.append((sec[self.name], self.module, self.name))
for alias in self.aliases:
module, name = alias.rsplit(".", 1)
sec = get_config(module, rootname=self.rootname)
if "." in module:
filename, module = module.split(".", 1)
else:
filename = module
module = ""
if name in sec:
if "." in self.module:
new_module = self.module.split(".", 1)[1]
else:
new_module = ""
warn(
f"Config parameter '{name}' {section_name(module)} of the file"
f" '{get_config_filename(filename, rootname=self.rootname)}' is"
f" deprecated. Use '{self.name}'"
f" {section_name(new_module)} instead.",
AstropyDeprecationWarning,
)
options.append((sec[name], module, name))
if len(options) == 0:
self.set(self.defaultvalue)
options.append((self.defaultvalue, None, None))
if len(options) > 1:
filename, sec = self.module.split(".", 1)
warn(
f"Config parameter '{self.name}' {section_name(sec)} of the file"
f" '{get_config_filename(filename, rootname=self.rootname)}' is given"
" by more than one alias"
f" ({', '.join(['.'.join(x[1:3]) for x in options if x[1] is not None])})."
" Using the first.",
AstropyDeprecationWarning,
)
val = options[0][0]
try:
return self._validate_val(val)
except validate.ValidateError as e:
raise TypeError(f"Configuration value not valid: {e.args[0]}")
def _validate_val(self, val):
"""Validates the provided value based on cfgtype and returns the
type-cast value.
throws the underlying configobj exception if it fails
"""
# note that this will normally use the *class* attribute `_validator`,
# but if some arcane reason is needed for making a special one for an
# instance or sub-class, it will be used
return self._validator.check(self.cfgtype, val)
# this dictionary stores the primary copy of the ConfigObj's for each
# root package
_cfgobjs = {}
def get_config_filename(packageormod=None, rootname=None):
"""
Get the filename of the config file associated with the given
package or module.
"""
cfg = get_config(packageormod, rootname=rootname)
while cfg.parent is not cfg:
cfg = cfg.parent
return cfg.filename
# This is used by testing to override the config file, so we can test
# with various config files that exercise different features of the
# config system.
_override_config_file = None
def get_config(packageormod=None, reload=False, rootname=None):
"""Gets the configuration object or section associated with a particular
package or module.
Parameters
----------
packageormod : str or None
The package for which to retrieve the configuration object. If a
string, it must be a valid package name, or if ``None``, the package from
which this function is called will be used.
reload : bool, optional
Reload the file, even if we have it cached.
rootname : str or None
Name of the root configuration directory. If ``None`` and
``packageormod`` is ``None``, this defaults to be the name of
the package from which this function is called. If ``None`` and
``packageormod`` is not ``None``, this defaults to ``astropy``.
Returns
-------
cfgobj : ``configobj.ConfigObj`` or ``configobj.Section``
If the requested package is a base package, this will be the
``configobj.ConfigObj`` for that package, or if it is a subpackage or
module, it will return the relevant ``configobj.Section`` object.
Raises
------
RuntimeError
If ``packageormod`` is `None`, but the package this item is created
from cannot be determined.
"""
if packageormod is None:
packageormod = find_current_module(2)
if packageormod is None:
msg1 = "Cannot automatically determine get_config module, "
msg2 = "because it is not called from inside a valid module"
raise RuntimeError(msg1 + msg2)
else:
packageormod = packageormod.__name__
_autopkg = True
else:
_autopkg = False
packageormodspl = packageormod.split(".")
pkgname = packageormodspl[0]
secname = ".".join(packageormodspl[1:])
if rootname is None:
if _autopkg:
rootname = pkgname
else:
rootname = "astropy" # so we don't break affiliated packages
cobj = _cfgobjs.get(pkgname, None)
if cobj is None or reload:
cfgfn = None
try:
# This feature is intended only for use by the unit tests
if _override_config_file is not None:
cfgfn = _override_config_file
else:
cfgfn = path.join(get_config_dir(rootname=rootname), pkgname + ".cfg")
cobj = configobj.ConfigObj(cfgfn, interpolation=False)
except OSError:
# This can happen when HOME is not set
cobj = configobj.ConfigObj(interpolation=False)
# This caches the object, so if the file becomes accessible, this
# function won't see it unless the module is reloaded
_cfgobjs[pkgname] = cobj
if secname: # not the root package
if secname not in cobj:
cobj[secname] = {}
return cobj[secname]
else:
return cobj
def generate_config(pkgname="astropy", filename=None, verbose=False):
"""Generates a configuration file, from the list of `ConfigItem`
objects for each subpackage.
.. versionadded:: 4.1
Parameters
----------
pkgname : str or None
The package for which to retrieve the configuration object.
filename : str or file-like or None
If None, the default configuration path is taken from `get_config`.
"""
if verbose:
verbosity = nullcontext
filter_warnings = AstropyDeprecationWarning
else:
verbosity = silence
filter_warnings = Warning
package = importlib.import_module(pkgname)
with verbosity(), warnings.catch_warnings():
warnings.simplefilter("ignore", category=filter_warnings)
for mod in pkgutil.walk_packages(
path=package.__path__, prefix=package.__name__ + "."
):
if mod.module_finder.path.endswith(("test", "tests")) or mod.name.endswith(
"setup_package"
):
# Skip test and setup_package modules
continue
if mod.name.split(".")[-1].startswith("_"):
# Skip private modules
continue
with contextlib.suppress(ImportError):
importlib.import_module(mod.name)
wrapper = TextWrapper(initial_indent="## ", subsequent_indent="## ", width=78)
if filename is None:
filename = get_config_filename(pkgname)
with contextlib.ExitStack() as stack:
if isinstance(filename, (str, os.PathLike)):
fp = stack.enter_context(open(filename, "w"))
else:
# assume it's a file object, or io.StringIO
fp = filename
# Parse the subclasses, ordered by their module name
subclasses = ConfigNamespace.__subclasses__()
processed = set()
for conf in sorted(subclasses, key=lambda x: x.__module__):
mod = conf.__module__
# Skip modules for other packages, e.g. astropy modules that
# would be imported when running the function for astroquery.
if mod.split(".")[0] != pkgname:
continue
# Check that modules are not processed twice, which can happen
# when they are imported in another module.
if mod in processed:
continue
else:
processed.add(mod)
print_module = True
for item in conf().values():
if print_module:
# If this is the first item of the module, we print the
# module name, but not if this is the root package...
if item.module != pkgname:
modname = item.module.replace(f"{pkgname}.", "")
fp.write(f"[{modname}]\n\n")
print_module = False
fp.write(wrapper.fill(item.description) + "\n")
if isinstance(item.defaultvalue, (tuple, list)):
if len(item.defaultvalue) == 0:
fp.write(f"# {item.name} = ,\n\n")
elif len(item.defaultvalue) == 1:
fp.write(f"# {item.name} = {item.defaultvalue[0]},\n\n")
else:
fp.write(
f"# {item.name} ="
f' {",".join(map(str, item.defaultvalue))}\n\n'
)
else:
fp.write(f"# {item.name} = {item.defaultvalue}\n\n")
def reload_config(packageormod=None, rootname=None):
"""Reloads configuration settings from a configuration file for the root
package of the requested package/module.
This overwrites any changes that may have been made in `ConfigItem`
objects. This applies for any items that are based on this file, which
is determined by the *root* package of ``packageormod``
(e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'``
module).
Parameters
----------
packageormod : str or None
The package or module name - see `get_config` for details.
rootname : str or None
Name of the root configuration directory - see `get_config`
for details.
"""
sec = get_config(packageormod, True, rootname=rootname)
# look for the section that is its own parent - that's the base object
while sec.parent is not sec:
sec = sec.parent
sec.reload()
def is_unedited_config_file(content, template_content=None):
"""
Determines if a config file can be safely replaced because it doesn't
actually contain any meaningful content, i.e. if it contains only comments
or is completely empty.
"""
buffer = io.StringIO(content)
raw_cfg = configobj.ConfigObj(buffer, interpolation=True)
# If any of the items is set, return False
return not any(len(v) > 0 for v in raw_cfg.values())
# This function is no more used by astropy but it is kept for the other
# packages that may use it (e.g. astroquery). It should be removed at some
# point.
# this is not in __all__ because it's not intended that a user uses it
@deprecated("5.0")
def update_default_config(pkg, default_cfg_dir_or_fn, version=None, rootname="astropy"):
"""
Checks if the configuration file for the specified package exists,
and if not, copy over the default configuration. If the
configuration file looks like it has already been edited, we do
not write over it, but instead write a file alongside it named
``pkg.version.cfg`` as a "template" for the user.
Parameters
----------
pkg : str
The package to be updated.
default_cfg_dir_or_fn : str
The filename or directory name where the default configuration file is.
If a directory name, ``'pkg.cfg'`` will be used in that directory.
version : str, optional
The current version of the given package. If not provided, it will
be obtained from ``pkg.__version__``.
rootname : str
Name of the root configuration directory.
Returns
-------
updated : bool
If the profile was updated, `True`, otherwise `False`.
Raises
------
AttributeError
If the version number of the package could not determined.
"""
if path.isdir(default_cfg_dir_or_fn):
default_cfgfn = path.join(default_cfg_dir_or_fn, pkg + ".cfg")
else:
default_cfgfn = default_cfg_dir_or_fn
if not path.isfile(default_cfgfn):
# There is no template configuration file, which basically
# means the affiliated package is not using the configuration
# system, so just return.
return False
cfgfn = get_config(pkg, rootname=rootname).filename
with open(default_cfgfn, encoding="latin-1") as fr:
template_content = fr.read()
doupdate = False
if cfgfn is not None:
if path.exists(cfgfn):
with open(cfgfn, encoding="latin-1") as fd:
content = fd.read()
identical = content == template_content
if not identical:
doupdate = is_unedited_config_file(content, template_content)
elif path.exists(path.dirname(cfgfn)):
doupdate = True
identical = False
if version is None:
version = resolve_name(pkg, "__version__")
# Don't install template files for dev versions, or we'll end up
# spamming `~/.astropy/config`.
if version and "dev" not in version and cfgfn is not None:
template_path = path.join(
get_config_dir(rootname=rootname), f"{pkg}.{version}.cfg"
)
needs_template = not path.exists(template_path)
else:
needs_template = False
if doupdate or needs_template:
if needs_template:
with open(template_path, "w", encoding="latin-1") as fw:
fw.write(template_content)
# If we just installed a new template file and we can't
# update the main configuration file because it has user
# changes, display a warning.
if not identical and not doupdate:
warn(
f"The configuration options in {pkg} {version} may have changed, "
"your configuration file was not updated in order to "
"preserve local changes. A new configuration template "
f"has been saved to '{template_path}'.",
ConfigurationChangedWarning,
)
if doupdate and not identical:
with open(cfgfn, "w", encoding="latin-1") as fw:
fw.write(template_content)
return True
return False
def create_config_file(pkg, rootname="astropy", overwrite=False):
"""
Create the default configuration file for the specified package.
If the file already exists, it is updated only if it has not been
modified. Otherwise the ``overwrite`` flag is needed to overwrite it.
Parameters
----------
pkg : str
The package to be updated.
rootname : str
Name of the root configuration directory.
overwrite : bool
Force updating the file if it already exists.
Returns
-------
updated : bool
If the profile was updated, `True`, otherwise `False`.
"""
# local import to prevent using the logger before it is configured
from astropy.logger import log
cfgfn = get_config_filename(pkg, rootname=rootname)
# generate the default config template
template_content = io.StringIO()
generate_config(pkg, template_content)
template_content.seek(0)
template_content = template_content.read()
doupdate = True
# if the file already exists, check that it has not been modified
if cfgfn is not None and path.exists(cfgfn):
with open(cfgfn, encoding="latin-1") as fd:
content = fd.read()
doupdate = is_unedited_config_file(content, template_content)
if doupdate or overwrite:
with open(cfgfn, "w", encoding="latin-1") as fw:
fw.write(template_content)
log.info(f"The configuration file has been successfully written to {cfgfn}")
return True
elif not doupdate:
log.warning(
"The configuration file already exists and seems to "
"have been customized, so it has not been updated. "
"Use overwrite=True if you really want to update it."
)
return False
|
1dc4d52d0bd47909fc36a44ebaeaf6fc0da45e522d041b8251d6ee785fc74909 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains functions to determine where configuration and
data/cache files used by Astropy should be placed.
"""
import os
import shutil
import sys
from functools import wraps
__all__ = ["get_config_dir", "get_cache_dir", "set_temp_config", "set_temp_cache"]
def _find_home():
"""Locates and return the home directory (or best approximation) on this
system.
Raises
------
OSError
If the home directory cannot be located - usually means you are running
Astropy on some obscure platform that doesn't have standard home
directories.
"""
try:
homedir = os.path.expanduser("~")
except Exception:
# Linux, Unix, AIX, OS X
if os.name == "posix":
if "HOME" in os.environ:
homedir = os.environ["HOME"]
else:
raise OSError(
"Could not find unix home directory to search for "
"astropy config dir"
)
elif os.name == "nt": # This is for all modern Windows (NT or after)
if "MSYSTEM" in os.environ and os.environ.get("HOME"):
# Likely using an msys shell; use whatever it is using for its
# $HOME directory
homedir = os.environ["HOME"]
# See if there's a local home
elif "HOMEDRIVE" in os.environ and "HOMEPATH" in os.environ:
homedir = os.path.join(os.environ["HOMEDRIVE"], os.environ["HOMEPATH"])
# Maybe a user profile?
elif "USERPROFILE" in os.environ:
homedir = os.path.join(os.environ["USERPROFILE"])
else:
try:
import winreg as wreg
shell_folders = r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders)
homedir = wreg.QueryValueEx(key, "Personal")[0]
key.Close()
except Exception:
# As a final possible resort, see if HOME is present
if "HOME" in os.environ:
homedir = os.environ["HOME"]
else:
raise OSError(
"Could not find windows home directory to "
"search for astropy config dir"
)
else:
# for other platforms, try HOME, although it probably isn't there
if "HOME" in os.environ:
homedir = os.environ["HOME"]
else:
raise OSError(
"Could not find a home directory to search for "
"astropy config dir - are you on an unsupported "
"platform?"
)
return homedir
def get_config_dir(rootname="astropy"):
"""
Determines the package configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.astropy/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Parameters
----------
rootname : str
Name of the root configuration directory. For example, if ``rootname =
'pkgname'``, the configuration directory would be ``<home>/.pkgname/``
rather than ``<home>/.astropy`` (depending on platform).
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_config, that overrides all
if set_temp_config._temp_path is not None:
xch = set_temp_config._temp_path
config_path = os.path.join(xch, rootname)
if not os.path.exists(config_path):
os.mkdir(config_path)
return os.path.abspath(config_path)
# first look for XDG_CONFIG_HOME
xch = os.environ.get("XDG_CONFIG_HOME")
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, rootname)
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_root_dir("config", linkto, rootname))
def get_cache_dir(rootname="astropy"):
"""
Determines the Astropy cache directory name and creates the directory if it
doesn't exist.
This directory is typically ``$HOME/.astropy/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Parameters
----------
rootname : str
Name of the root cache directory. For example, if
``rootname = 'pkgname'``, the cache directory will be
``<cache>/.pkgname/``.
Returns
-------
cachedir : str
The absolute path to the cache directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_cache, that overrides all
if set_temp_cache._temp_path is not None:
xch = set_temp_cache._temp_path
cache_path = os.path.join(xch, rootname)
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return os.path.abspath(cache_path)
# first look for XDG_CACHE_HOME
xch = os.environ.get("XDG_CACHE_HOME")
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, rootname)
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_root_dir("cache", linkto, rootname))
class _SetTempPath:
_temp_path = None
_default_path_getter = None
def __init__(self, path=None, delete=False):
if path is not None:
path = os.path.abspath(path)
self._path = path
self._delete = delete
self._prev_path = self.__class__._temp_path
def __enter__(self):
self.__class__._temp_path = self._path
try:
return self._default_path_getter("astropy")
except Exception:
self.__class__._temp_path = self._prev_path
raise
def __exit__(self, *args):
self.__class__._temp_path = self._prev_path
if self._delete and self._path is not None:
shutil.rmtree(self._path)
def __call__(self, func):
"""Implements use as a decorator."""
@wraps(func)
def wrapper(*args, **kwargs):
with self:
func(*args, **kwargs)
return wrapper
class set_temp_config(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_config_dir)
def __enter__(self):
# Special case for the config case, where we need to reset all the
# cached config objects. We do keep the cache, since some of it
# may have been set programmatically rather than be stored in the
# config file (e.g., iers.conf.auto_download=False for our tests).
from .configuration import _cfgobjs
self._cfgobjs_copy = _cfgobjs.copy()
_cfgobjs.clear()
return super().__enter__()
def __exit__(self, *args):
from .configuration import _cfgobjs
_cfgobjs.clear()
_cfgobjs.update(self._cfgobjs_copy)
del self._cfgobjs_copy
super().__exit__(*args)
class set_temp_cache(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy download cache,
primarily for use with testing (though there may be other applications
for setting a different cache directory, for example to switch to a cache
dedicated to large files).
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the cache path
just within that function.
Parameters
----------
path : str
The directory (which must exist) in which to find the Astropy cache
files, or create them if they do not already exist. If None, this
restores the cache path to the user's default cache path as returned
by `get_cache_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_cache_dir)
def _find_or_create_root_dir(dirnm, linkto, pkgname="astropy"):
innerdir = os.path.join(_find_home(), f".{pkgname}")
maindir = os.path.join(_find_home(), f".{pkgname}", dirnm)
if not os.path.exists(maindir):
# first create .astropy dir if needed
if not os.path.exists(innerdir):
try:
os.mkdir(innerdir)
except OSError:
if not os.path.isdir(innerdir):
raise
elif not os.path.isdir(innerdir):
raise OSError(
f"Intended {pkgname} {dirnm} directory {maindir} is actually a file."
)
try:
os.mkdir(maindir)
except OSError:
if not os.path.isdir(maindir):
raise
if (
not sys.platform.startswith("win")
and linkto is not None
and not os.path.exists(linkto)
):
os.symlink(maindir, linkto)
elif not os.path.isdir(maindir):
raise OSError(
f"Intended {pkgname} {dirnm} directory {maindir} is actually a file."
)
return os.path.abspath(maindir)
|
f1d27eac270d4ea6dc04944ed53c5d1c9adf4c40b399a8f984db9d96028571ed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
# pylint: disable=invalid-name
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
from importlib.metadata import entry_points
import numpy as np
from astropy.units import Quantity
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyUserWarning
from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex
from .spline import (
SplineExactKnotsFitter,
SplineInterpolateFitter,
SplineSmoothingFitter,
SplineSplrepFitter,
)
from .statistic import leastsquare
from .utils import _combine_equivalency_dict, poly_map_domain
__all__ = [
"LinearLSQFitter",
"LevMarLSQFitter",
"TRFLSQFitter",
"DogBoxLSQFitter",
"LMLSQFitter",
"FittingWithOutlierRemoval",
"SLSQPLSQFitter",
"SimplexLSQFitter",
"JointFitter",
"Fitter",
"ModelLinearityError",
"ModelsError",
"SplineExactKnotsFitter",
"SplineInterpolateFitter",
"SplineSmoothingFitter",
"SplineSplrepFitter",
]
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
class NonFiniteValueError(RuntimeError):
"""
Error raised when attempting to a non-finite value.
"""
class Covariance:
"""Class for covariance matrix calculated by fitter."""
def __init__(self, cov_matrix, param_names):
self.cov_matrix = cov_matrix
self.param_names = param_names
def pprint(self, max_lines, round_val):
# Print and label lower triangle of covariance matrix
# Print rows for params up to `max_lines`, round floats to 'round_val'
longest_name = max(len(x) for x in self.param_names)
ret_str = "parameter variances / covariances \n"
fstring = f'{"": <{longest_name}}| {{0}}\n'
for i, row in enumerate(self.cov_matrix):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += fstring.replace(" " * len(param), param, 1).format(
repr(np.round(row[: i + 1], round_val))[7:-2]
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, params):
# index covariance matrix by parameter names or indices
if len(params) != 2:
raise ValueError("Covariance must be indexed by two values.")
if all(isinstance(item, str) for item in params):
i1, i2 = self.param_names.index(params[0]), self.param_names.index(
params[1]
)
elif all(isinstance(item, int) for item in params):
i1, i2 = params
else:
raise TypeError(
"Covariance can be indexed by two parameter names or integer indices."
)
return self.cov_matrix[i1][i2]
class StandardDeviations:
"""Class for fitting uncertainties."""
def __init__(self, cov_matrix, param_names):
self.param_names = param_names
self.stds = self._calc_stds(cov_matrix)
def _calc_stds(self, cov_matrix):
# sometimes scipy lstsq returns a non-sensical negative vals in the
# diagonals of the cov_x it computes.
stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)]
return stds
def pprint(self, max_lines, round_val):
longest_name = max(len(x) for x in self.param_names)
ret_str = "standard deviations\n"
for i, std in enumerate(self.stds):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += (
f"{param}{' ' * (longest_name - len(param))}| "
f"{np.round(std, round_val)}\n"
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, param):
if isinstance(param, str):
i = self.param_names.index(param)
elif isinstance(param, int):
i = param
else:
raise TypeError(
"Standard deviation can be indexed by parameter name or integer."
)
return self.stds[i]
class ModelsError(Exception):
"""Base class for model exceptions."""
class ModelLinearityError(ModelsError):
"""Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith("_"):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop("equivalencies", None)
data_has_units = (
isinstance(x, Quantity)
or isinstance(y, Quantity)
or isinstance(z, Quantity)
)
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies
)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(
model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]],
)
if isinstance(y, Quantity) and z is not None:
y = y.to(
model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]],
)
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data["z"] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
if isinstance(model, tuple):
rename_data["_left_kwargs"] = model[1]
rename_data["_right_kwargs"] = model[2]
model = model[0]
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError(
"This model does not support being fit to data with units."
)
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
supported_constraints = []
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@staticmethod
def _add_fitting_uncertainties(*args):
"""
When available, calculate and sets the parameter covariance matrix
(model.cov_matrix) and standard deviations (model.stds).
"""
return None
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ["fixed"]
supports_masked_input = True
def __init__(self, calc_uncertainties=False):
self.fit_info = {
"residuals": None,
"rank": None,
"singular_values": None,
"params": None,
}
self._calc_uncertainties = calc_uncertainties
@staticmethod
def _is_invertible(m):
"""Check if inverse of matrix can be obtained."""
if m.shape[0] != m.shape[1]:
return False
if np.linalg.matrix_rank(m) < m.shape[0]:
return False
return True
def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None, resids=None):
"""
Calculate and parameter covariance matrix and standard deviations
and set `cov_matrix` and `stds` attributes.
"""
x_dot_x_prime = np.dot(a.T, a)
masked = False or hasattr(y, "mask")
# check if invertible. if not, can't calc covariance.
if not self._is_invertible(x_dot_x_prime):
return model
inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime)
if z is None: # 1D models
if len(model) == 1: # single model
mask = None
if masked:
mask = y.mask
xx = np.ma.array(x, mask=mask)
RSS = [(1 / (xx.count() - n_coeff)) * resids]
if len(model) > 1: # model sets
RSS = [] # collect sum residuals squared for each model in set
for j in range(len(model)):
mask = None
if masked:
mask = y.mask[..., j].flatten()
xx = np.ma.array(x, mask=mask)
eval_y = model(xx, model_set_axis=False)
eval_y = np.rollaxis(eval_y, model.model_set_axis)[j]
RSS.append(
(1 / (xx.count() - n_coeff)) * np.sum((y[..., j] - eval_y) ** 2)
)
else: # 2D model
if len(model) == 1:
mask = None
if masked:
warnings.warn(
"Calculation of fitting uncertainties "
"for 2D models with masked values not "
"currently supported.\n",
AstropyUserWarning,
)
return
xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask)
# len(xx) instead of xx.count. this will break if values are masked?
RSS = [(1 / (len(xx) - n_coeff)) * resids]
else:
RSS = []
for j in range(len(model)):
eval_z = model(x, y, model_set_axis=False)
mask = None # need to figure out how to deal w/ masking here.
if model.model_set_axis == 1:
# model_set_axis passed when evaluating only refers to input shapes
# so output must be reshaped for model_set_axis=1.
eval_z = np.rollaxis(eval_z, 1)
eval_z = eval_z[j]
RSS.append(
[(1 / (len(x) - n_coeff)) * np.sum((z[j] - eval_z) ** 2)]
)
covs = [inv_x_dot_x_prime * r for r in RSS]
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
if len(covs) == 1:
model.cov_matrix = Covariance(covs[0], model.param_names)
model.stds = StandardDeviations(covs[0], free_param_names)
else:
model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs]
model.stds = [StandardDeviations(cov, free_param_names) for cov in covs]
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, "domain") and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, "window") and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, "x_domain") and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, "y_domain") and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, "x_window") and model.x_window is None:
model.x_window = [-1.0, 1.0]
if hasattr(model, "y_window") and model.y_window is None:
model.y_window = [-1.0, 1.0]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like, optional
Input coordinates.
If the dependent (``y`` or ``z``) coordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
coordinate grids differ.
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError(
"Model is not linear in parameters, "
"linear fit methods should not be used."
)
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
model_copy.sync_constraints = False
_, fitparam_indices, _ = model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(
x, y, z, n_models=len(model_copy), model_set_axis=model_copy.model_set_axis
)
n_fixed = sum(model_copy.fixed.values())
# This is also done by _convert_inputs, but we need it here to allow
# checking the array dimensionality before that gets called:
if weights is not None:
weights = np.asarray(weights, dtype=float)
if n_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [
idx
for idx in range(len(model_copy.param_names))
if idx not in fitparam_indices
]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray(
[
getattr(model_copy, model_copy.param_names[idx]).value
for idx in fixparam_indices
]
)
if len(farg) == 2:
x, y = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, weights = _convert_input(
x,
weights,
n_models=len(model_copy) if weights.ndim == y.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "domain"):
x = self._map_domain_window(model_copy, x)
if n_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x
)
else:
lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, _, weights = _convert_input(
x,
y,
weights,
n_models=len(model_copy) if weights.ndim == z.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "x_domain"):
x, y = self._map_domain_window(model_copy, x, y)
if n_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x, y=y)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x, y=y
)
else:
lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
if weights is not None:
# Same for weights
if weights.ndim > 2:
# Separate 2D weights for each model:
weights = np.rollaxis(weights, model_axis, weights.ndim)
weights = weights.reshape(-1, weights.shape[-1])
elif weights.ndim == z.ndim:
# Separate, flattened weights for each model:
weights = weights.T if model_axis == 0 else weights
else:
# Common weights for all the models:
weights = weights.flatten()
else:
rhs = z.flatten()
if weights is not None:
weights = weights.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if np.asanyarray(lhs).ndim > 2:
raise ValueError(
f"{type(model_copy).__name__} gives unsupported >2D "
"derivative matrix for this x/y"
)
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if n_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input coordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
if rhs.ndim == 2:
if weights.shape == rhs.shape:
# separate weights for multiple models case: broadcast
# lhs to have more dimension (for each model)
lhs = lhs[..., np.newaxis] * weights[:, np.newaxis]
rhs = rhs * weights
else:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original
# dependent variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if weights is not None and not masked and np.any(np.isnan(lhs)):
raise ValueError(
"Found NaNs in the coefficient matrix, which "
"should not happen and would crash the lapack "
"routine. Maybe check that weights are not null."
)
a = None # need for calculating covarience
if (masked and len(model_copy) > 1) or (
weights is not None and weights.ndim > 1
):
# Separate masks or weights for multiple models case: Numpy's
# lstsq supports multiple dimensions only for rhs, so we need to
# loop manually on the models. This may be fixed in the future
# with https://github.com/numpy/numpy/pull/15777.
# Initialize empty array of coefficients and populate it one model
# at a time. The shape matches the number of coefficients from the
# Vandermonde matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype)
# Arrange the lhs as a stack of 2D matrices that we can iterate
# over to get the correctly-orientated lhs for each model:
if lhs.ndim > 2:
lhs_stack = np.rollaxis(lhs, -1, 0)
else:
lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask if masked else slice(None)
model_lhs = model_lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
a = model_lhs
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(
model_lhs, model_rhs, rcond
)
model_lacoef[:] = t_coef.T
else:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
a = lhs[good]
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good], rhs[good], rcond)
self.fit_info["residuals"] = resids
self.fit_info["rank"] = rank
self.fit_info["singular_values"] = sval
lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl
self.fit_info["params"] = lacoef
fitter_to_model_params(model_copy, lacoef.flatten())
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if (
hasattr(model_copy, "_order")
and len(model_copy) == 1
and rank < (model_copy._order - n_fixed)
):
warnings.warn("The fit may be poorly conditioned\n", AstropyUserWarning)
# calculate and set covariance matrix and standard devs. on model
if self._calc_uncertainties:
if len(y) > len(lacoef):
self._add_fitting_uncertainties(
model_copy, a * scl, len(lacoef), x, y, z, resids
)
model_copy.sync_constraints = True
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : `Fitter`
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : callable
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {"niter": None}
def __str__(self):
return (
f"Fitter: {self.fitter.__class__.__name__}\n"
f"Outlier function: {self.outlier_func.__name__}\n"
f"Num. of iterations: {self.niter}\n"
f"Outlier func. args.: {self.outlier_kwargs}"
)
def __repr__(self):
return (
f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, "
f"outlier_func: {self.outlier_func.__name__},"
f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})"
)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like, optional
Data measurements (2D case).
weights : array-like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y coordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if (
not hasattr(self.fitter, "supports_masked_input")
or self.fitter.supports_masked_input is not True
):
raise ValueError(
f"{type(self.fitter).__name__} cannot fit model sets with masked "
"values"
)
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input coordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x,)
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if "axis" not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs["axis"] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
n = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for n in range(1, self.niter + 1):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop("axis", None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True,
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(
data_T, mask_T, model_vals_T
):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn(
"outlier_func did not accept axis argument; "
"reverted to slow loop over models.",
AstropyUserWarning,
)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(
fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights,
**kwargs,
)
else:
fitted_model = self.fitter(
fitted_model,
*coords,
filtered_data,
weights=filtered_weights,
**kwargs,
)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {"niter": n}
self.fit_info.update(getattr(self.fitter, "fit_info", {}))
return fitted_model, filtered_data.mask
class _NonLinearLSQFitter(metaclass=_FitterMeta):
"""
Base class for Non-Linear least-squares fitters.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds : bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition.
Default: True
"""
supported_constraints = ["fixed", "tied", "bounds"]
"""
The constraint types supported by this fitter type.
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=True):
self.fit_info = None
self._calc_uncertainties = calc_uncertainties
self._use_min_max_bounds = use_min_max_bounds
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
fitter_to_model_params(model, fps, self._use_min_max_bounds)
meas = args[-1]
if weights is None:
value = np.ravel(model(*args[2:-1]) - meas)
else:
value = np.ravel(weights * (model(*args[2:-1]) - meas))
if not np.all(np.isfinite(value)):
raise NonFiniteValueError(
"Objective function has encountered a non-finite value, "
"this will cause the fit to fail!\n"
"Please remove non-finite values from your input data before "
"fitting to avoid this error."
)
return value
@staticmethod
def _add_fitting_uncertainties(model, cov_matrix):
"""
Set ``cov_matrix`` and ``stds`` attributes on model with parameter
covariance matrix returned by ``optimize.leastsq``.
"""
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
model.cov_matrix = Covariance(cov_matrix, free_param_names)
model.stds = StandardDeviations(cov_matrix, free_param_names)
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array(
[np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)]
)
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars], True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
fit_deriv = np.array(model.fit_deriv(x, *params))
try:
output = np.array(
[np.ravel(_) for _ in np.array(weights) * fit_deriv]
)
if output.shape != fit_deriv.shape:
output = np.array(
[np.ravel(_) for _ in np.atleast_2d(weights).T * fit_deriv]
)
return output
except ValueError:
return np.array(
[
np.ravel(_)
for _ in np.array(weights) * np.moveaxis(fit_deriv, -1, 0)
]
).transpose()
else:
if not model.col_fit_deriv:
return [
np.ravel(_)
for _ in (
np.ravel(weights)
* np.array(model.fit_deriv(x, y, *params)).T
).T
]
return [
np.ravel(_)
for _ in weights * np.array(model.fit_deriv(x, y, *params))
]
def _compute_param_cov(self, model, y, init_values, cov_x, fitparams, farg):
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg) ** 2)
dof = len(y) - len(init_values)
self.fit_info["param_cov"] = cov_x * sum_sqrs / dof
else:
self.fit_info["param_cov"] = None
if self._calc_uncertainties is True:
if self.fit_info["param_cov"] is not None:
self._add_fitting_uncertainties(model, self.fit_info["param_cov"])
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
return None, None, None
def _filter_non_finite(self, x, y, z=None):
"""
Filter out non-finite values in x, y, z.
Returns
-------
x, y, z : ndarrays
x, y, and z with non-finite values filtered out.
"""
MESSAGE = "Non-Finite input data has been removed by the fitter."
if z is None:
mask = np.isfinite(y)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], None
else:
mask = np.isfinite(z)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], z[mask]
@fitter_unit_support
def __call__(
self,
model,
x,
y,
z=None,
weights=None,
maxiter=DEFAULT_MAXITER,
acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS,
estimate_jacobian=False,
filter_non_finite=False,
):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
filter_non_finite : bool, optional
Whether or not to filter data with non-finite values. Default is False
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self.supported_constraints)
model_copy.sync_constraints = False
if filter_non_finite:
x, y, z = self._filter_non_finite(x, y, z)
farg = (
model_copy,
weights,
) + _convert_input(x, y, z)
init_values, fitparams, cov_x = self._run_fitter(
model_copy, farg, maxiter, acc, epsilon, estimate_jacobian
)
self._compute_param_cov(model_copy, y, init_values, cov_x, fitparams, farg)
model.sync_constraints = True
return model_copy
class LevMarLSQFitter(_NonLinearLSQFitter):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
def __init__(self, calc_uncertainties=False):
super().__init__(calc_uncertainties)
self.fit_info = {
"nfev": None,
"fvec": None,
"fjac": None,
"ipvt": None,
"qtf": None,
"message": None,
"ierr": None,
"param_jac": None,
"param_cov": None,
}
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
if model.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _, _ = model_to_fit_params(model)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function,
init_values,
args=farg,
Dfun=dfunc,
col_deriv=model.col_fit_deriv,
maxfev=maxiter,
epsfcn=epsilon,
xtol=acc,
full_output=True,
)
fitter_to_model_params(model, fitparams)
self.fit_info.update(dinfo)
self.fit_info["cov_x"] = cov_x
self.fit_info["message"] = mess
self.fit_info["ierr"] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn(
"The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning,
)
return init_values, fitparams, cov_x
class _NLLSQFitter(_NonLinearLSQFitter):
"""
Wrapper class for `scipy.optimize.least_squares` method, which provides:
- Trust Region Reflective
- dogbox
- Levenberg-Marqueardt
algorithms using the least squares statistic.
Parameters
----------
method : str
‘trf’ : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
‘dogbox’ : dogleg algorithm with rectangular trust regions, typical
use case is small problems with bounds. Not recommended for
problems with rank-deficient Jacobian.
‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn’t handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__(calc_uncertainties, use_min_max_bounds)
self._method = method
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
from scipy.linalg import svd
if model.fit_deriv is None or estimate_jacobian:
dfunc = "2-point"
else:
def _dfunc(params, model, weights, x, y, z=None):
if model.col_fit_deriv:
return np.transpose(
self._wrap_deriv(params, model, weights, x, y, z)
)
else:
return self._wrap_deriv(params, model, weights, x, y, z)
dfunc = _dfunc
init_values, _, bounds = model_to_fit_params(model)
# Note, if use_min_max_bounds is True we are defaulting to enforcing bounds
# using the old method employed by LevMarLSQFitter, this is different
# from the method that optimize.least_squares employs to enforce bounds
# thus we override the bounds being passed to optimize.least_squares so
# that it will not enforce any bounding.
if self._use_min_max_bounds:
bounds = (-np.inf, np.inf)
self.fit_info = optimize.least_squares(
self.objective_function,
init_values,
args=farg,
jac=dfunc,
max_nfev=maxiter,
diff_step=np.sqrt(epsilon),
xtol=acc,
method=self._method,
bounds=bounds,
)
# Adapted from ~scipy.optimize.minpack, see:
# https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(self.fit_info.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[: s.size]
cov_x = np.dot(VT.T / s**2, VT)
fitter_to_model_params(model, self.fit_info.x, False)
if not self.fit_info.success:
warnings.warn(
f"The fit may be unsuccessful; check: \n {self.fit_info.message}",
AstropyUserWarning,
)
return init_values, self.fit_info.x, cov_x
class TRFLSQFitter(_NLLSQFitter):
"""
Trust Region Reflective algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("trf", calc_uncertainties, use_min_max_bounds)
class DogBoxLSQFitter(_NLLSQFitter):
"""
DogBox algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("dogbox", calc_uncertainties, use_min_max_bounds)
class LMLSQFitter(_NLLSQFitter):
"""
`scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False):
super().__init__("lm", calc_uncertainties, True)
class SLSQPLSQFitter(Fitter):
"""
Sequential Least Squares Programming (SLSQP) optimization algorithm and
least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
Notes
-----
See also the `~astropy.modeling.optimizers.SLSQP` optimizer.
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
`ModelLinearityError`
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self.model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]["slice"]
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[: model.n_inputs + 1]
del lstsqargs[: model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError(f"Expected >1 models, {len(self.models)} is given")
if len(self.jointparams.keys()) < 2:
raise TypeError(
"At least two parameters are expected, "
f"{len(self.jointparams.keys())} is given"
)
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError(
f"{len(self.jointparams[j])} parameter(s) "
f"provided but {len(self.initvals)} expected"
)
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError(
f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} "
f"coordinates in args but {len(args)} provided"
)
self.fitparams[:], _ = optimize.leastsq(
self.objective_function, self.fitparams, args=args
)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
data_ndim, data_shape = z.ndim, z.shape
else:
data_ndim, data_shape = y.ndim, y.shape
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1 or data_ndim > x.ndim:
if (model_set_axis or 0) >= data_ndim:
raise ValueError("model_set_axis out of range")
if data_shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y or z array) is expected to equal "
"the number of parameter sets"
)
if z is None:
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
data_shape = y.shape[:-1]
else:
# Shape of z excluding model_set_axis
data_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1 :]
if z is None:
if data_shape != x.shape:
raise ValueError("x and y should have the same shape")
farg = (x, y)
else:
if not (x.shape == y.shape == data_shape):
raise ValueError("x, y and z should have the same shape")
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def fitter_to_model_params(model, fps, use_min_max_bounds=True):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
Parameters
----------
model :
The model being fit
fps :
The fit parameter values to be assigned
use_min_max_bounds: bool
If the set parameter bounds for model will be enforced on each
parameter with bounds.
Default: True
"""
_, fit_param_indices, _ = model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]["slice"]
shape = param_metrics[name]["shape"]
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset : offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None) and use_min_max_bounds:
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]["slice"]
# To handle multiple tied constraints, model parameters
# need to be updated after each iteration.
parameters[slice_] = value
model._array_to_parameters()
@deprecated("5.1", "private method: _fitter_to_model_params has been made public now")
def _fitter_to_model_params(model, fps):
return fitter_to_model_params(model, fps)
def model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
model_params = model.parameters
model_bounds = list(model.bounds.values())
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model_params)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]["slice"]
del params[slice_]
del model_bounds[slice_]
del fitparam_indices[idx]
model_params = np.array(params)
for idx, bound in enumerate(model_bounds):
if bound[0] is None:
lower = -np.inf
else:
lower = bound[0]
if bound[1] is None:
upper = np.inf
else:
upper = bound[1]
model_bounds[idx] = (lower, upper)
model_bounds = tuple(zip(*model_bounds))
return model_params, fitparam_indices, model_bounds
@deprecated("5.1", "private method: _model_to_fit_params has been made public now")
def _model_to_fit_params(model):
return model_to_fit_params(model)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = "Optimizer cannot handle {0} constraints."
if any(model.fixed.values()) and "fixed" not in supported_constraints:
raise UnsupportedConstraintError(message.format("fixed parameter"))
if any(model.tied.values()) and "tied" not in supported_constraints:
raise UnsupportedConstraintError(message.format("tied parameter"))
if (
any(tuple(b) != (None, None) for b in model.bounds.values())
and "bounds" not in supported_constraints
):
raise UnsupportedConstraintError(message.format("bound parameter"))
if model.eqcons and "eqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("equality"))
if model.ineqcons and "ineqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("inequality"))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn(
"Model is linear in parameters; consider using linear fitting methods.",
AstropyUserWarning,
)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here
<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(
AstropyUserWarning(
f"{type(e).__name__} error occurred in entry point {name}."
)
)
else:
if not inspect.isclass(entry_point):
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to be a Class."
)
)
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to extend "
"astropy.modeling.Fitter"
)
)
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, "select"):
populate_entry_points(ep.select(group="astropy.modeling"))
else:
populate_entry_points(ep.get("astropy.modeling", []))
_populate_ep()
|
35a57bf20da1418a7b3a4d99c8a7bb79677e6887c3c1de24845243711391e370 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
This module defines classes that deal with parameters.
It is unlikely users will need to work with these classes directly,
unless they define their own models.
"""
import functools
import numbers
import operator
import numpy as np
from astropy.units import MagUnit, Quantity
from astropy.utils import isiterable
from .utils import array_repr_oneline, get_inputs_and_params
__all__ = ["Parameter", "InputParameterError", "ParameterError"]
class ParameterError(Exception):
"""Generic exception class for all exceptions pertaining to Parameters."""
class InputParameterError(ValueError, ParameterError):
"""Used for incorrect input parameter values and definitions."""
class ParameterDefinitionError(ParameterError):
"""Exception in declaration of class-level Parameters."""
def _tofloat(value):
"""Convert a parameter to float or float array."""
if isiterable(value):
try:
value = np.asanyarray(value, dtype=float)
except (TypeError, ValueError):
# catch arrays with strings or user errors like different
# types of parameters in a parameter set
raise InputParameterError(
f"Parameter of {type(value)} could not be converted to float"
)
elif isinstance(value, Quantity):
# Quantities are fine as is
pass
elif isinstance(value, np.ndarray):
# A scalar/dimensionless array
value = float(value.item())
elif isinstance(value, (numbers.Number, np.number)) and not isinstance(value, bool):
value = float(value)
elif isinstance(value, bool):
raise InputParameterError(
"Expected parameter to be of numerical type, not boolean"
)
else:
raise InputParameterError(
f"Don't know how to convert parameter of {type(value)} to float"
)
return value
# Helpers for implementing operator overloading on Parameter
def _binary_arithmetic_operation(op, reflected=False):
@functools.wraps(op)
def wrapper(self, val):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
if reflected:
return op(val, self_value)
else:
return op(self_value, val)
return wrapper
def _binary_comparison_operation(op):
@functools.wraps(op)
def wrapper(self, val):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value, val)
return wrapper
def _unary_arithmetic_operation(op):
@functools.wraps(op)
def wrapper(self):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value)
return wrapper
class Parameter:
"""
Wraps individual parameters.
Since 4.0 Parameters are no longer descriptors and are based on a new
implementation of the Parameter class. Parameters now (as of 4.0) store
values locally (as instead previously in the associated model)
This class represents a model's parameter (in a somewhat broad sense). It
serves a number of purposes:
1) A type to be recognized by models and treated specially at class
initialization (i.e., if it is found that there is a class definition
of a Parameter, the model initializer makes a copy at the instance level).
2) Managing the handling of allowable parameter values and once defined,
ensuring updates are consistent with the Parameter definition. This
includes the optional use of units and quantities as well as transforming
values to an internally consistent representation (e.g., from degrees to
radians through the use of getters and setters).
3) Holding attributes of parameters relevant to fitting, such as whether
the parameter may be varied in fitting, or whether there are constraints
that must be satisfied.
See :ref:`astropy:modeling-parameters` for more details.
Parameters
----------
name : str
parameter name
.. warning::
The fact that `Parameter` accepts ``name`` as an argument is an
implementation detail, and should not be used directly. When
defining a new `Model` class, parameter names are always
automatically defined by the class attribute they're assigned to.
description : str
parameter description
default : float or array
default value to use for this parameter
unit : `~astropy.units.Unit`
if specified, the parameter will be in these units, and when the
parameter is updated in future, it should be set to a
:class:`~astropy.units.Quantity` that has equivalent units.
getter : callable
a function that wraps the raw (internal) value of the parameter
when returning the value through the parameter proxy (eg. a
parameter may be stored internally as radians but returned to the
user as degrees)
setter : callable
a function that wraps any values assigned to this parameter; should
be the inverse of getter
fixed : bool
if True the parameter is not varied during fitting
tied : callable or False
if callable is supplied it provides a way to link the value of this
parameter to another parameter (or some other arbitrary function)
min : float
the lower bound of a parameter
max : float
the upper bound of a parameter
bounds : tuple
specify min and max as a single tuple--bounds may not be specified
simultaneously with min or max
mag : bool
Specify if the unit of the parameter can be a Magnitude unit or not
"""
constraints = ("fixed", "tied", "bounds")
"""
Types of constraints a parameter can have. Excludes 'min' and 'max'
which are just aliases for the first and second elements of the 'bounds'
constraint (which is represented as a 2-tuple). 'prior' and 'posterior'
are available for use by user fitters but are not used by any built-in
fitters as of this writing.
"""
def __init__(
self,
name="",
description="",
default=None,
unit=None,
getter=None,
setter=None,
fixed=False,
tied=False,
min=None,
max=None,
bounds=None,
prior=None,
posterior=None,
mag=False,
):
super().__init__()
self._model = None
self._model_required = False
self._setter = self._create_value_wrapper(setter, None)
self._getter = self._create_value_wrapper(getter, None)
self._name = name
self.__doc__ = self._description = description.strip()
# We only need to perform this check on unbound parameters
if isinstance(default, Quantity):
if unit is not None and not unit.is_equivalent(default.unit):
raise ParameterDefinitionError(
f"parameter default {default} does not have units equivalent to "
f"the required unit {unit}"
)
unit = default.unit
default = default.value
self._default = default
self._mag = mag
self._set_unit(unit, force=True)
# Internal units correspond to raw_units held by the model in the
# previous implementation. The private _getter and _setter methods
# use this to convert to and from the public unit defined for the
# parameter.
self._internal_unit = None
if not self._model_required:
if self._default is not None:
self.value = self._default
else:
self._value = None
# NOTE: These are *default* constraints--on model instances constraints
# are taken from the model if set, otherwise the defaults set here are
# used
if bounds is not None:
if min is not None or max is not None:
raise ValueError(
"bounds may not be specified simultaneously with min or "
f"max when instantiating Parameter {name}"
)
else:
bounds = (min, max)
self._fixed = fixed
self._tied = tied
self._bounds = bounds
self._order = None
self._validator = None
self._prior = prior
self._posterior = posterior
self._std = None
def __set_name__(self, owner, name):
self._name = name
def __len__(self):
val = self.value
if val.shape == ():
return 1
else:
return val.shape[0]
def __getitem__(self, key):
value = self.value
if len(value.shape) == 0:
# Wrap the value in a list so that getitem can work for sensible
# indices like [0] and [-1]
value = [value]
return value[key]
def __setitem__(self, key, value):
# Get the existing value and check whether it even makes sense to
# apply this index
oldvalue = self.value
if isinstance(key, slice):
if len(oldvalue[key]) == 0:
raise InputParameterError(
"Slice assignment outside the parameter dimensions for "
f"'{self.name}'"
)
for idx, val in zip(range(*key.indices(len(self))), value):
self.__setitem__(idx, val)
else:
try:
oldvalue[key] = value
except IndexError:
raise InputParameterError(
f"Input dimension {key} invalid for {self.name!r} parameter with "
f"dimension {value.shape[0]}"
) # likely wrong
def __repr__(self):
args = f"'{self._name}'"
args += f", value={self.value}"
if self.unit is not None:
args += f", unit={self.unit}"
for cons in self.constraints:
val = getattr(self, cons)
if val not in (None, False, (None, None)):
# Maybe non-obvious, but False is the default for the fixed and
# tied constraints
args += f", {cons}={val}"
return f"{self.__class__.__name__}({args})"
@property
def name(self):
"""Parameter name."""
return self._name
@property
def default(self):
"""Parameter default value."""
return self._default
@property
def value(self):
"""The unadorned value proxied by this parameter."""
if self._getter is None and self._setter is None:
return np.float64(self._value)
else:
# This new implementation uses the names of internal_unit
# in place of raw_unit used previously. The contrast between
# internal values and units is that between the public
# units that the parameter advertises to what it actually
# uses internally.
if self.internal_unit:
return np.float64(
self._getter(
self._internal_value, self.internal_unit, self.unit
).value
)
elif self._getter:
return np.float64(self._getter(self._internal_value))
elif self._setter:
return np.float64(self._internal_value)
@value.setter
def value(self, value):
if isinstance(value, Quantity):
raise TypeError(
"The .value property on parameters should be set"
" to unitless values, not Quantity objects. To set"
"a parameter to a quantity simply set the "
"parameter directly without using .value"
)
if self._setter is None:
self._value = np.array(value, dtype=np.float64)
else:
self._internal_value = np.array(self._setter(value), dtype=np.float64)
@property
def unit(self):
"""
The unit attached to this parameter, if any.
On unbound parameters (i.e. parameters accessed through the
model class, rather than a model instance) this is the required/
default unit for the parameter.
"""
return self._unit
@unit.setter
def unit(self, unit):
if self.unit is None:
raise ValueError(
"Cannot attach units to parameters that were "
"not initially specified with units"
)
else:
raise ValueError(
"Cannot change the unit attribute directly, "
"instead change the parameter to a new quantity"
)
def _set_unit(self, unit, force=False):
if force:
if isinstance(unit, MagUnit) and not self._mag:
raise ValueError(
"This parameter does not support the magnitude units such as"
f" {unit}"
)
self._unit = unit
else:
self.unit = unit
@property
def internal_unit(self):
"""
Return the internal unit the parameter uses for the internal value stored.
"""
return self._internal_unit
@internal_unit.setter
def internal_unit(self, internal_unit):
"""
Set the unit the parameter will convert the supplied value to the
representation used internally.
"""
self._internal_unit = internal_unit
@property
def quantity(self):
"""
This parameter, as a :class:`~astropy.units.Quantity` instance.
"""
if self.unit is None:
return None
return self.value * self.unit
@quantity.setter
def quantity(self, quantity):
if not isinstance(quantity, Quantity):
raise TypeError(
"The .quantity attribute should be set to a Quantity object"
)
self.value = quantity.value
self._set_unit(quantity.unit, force=True)
@property
def shape(self):
"""The shape of this parameter's value array."""
if self._setter is None:
return self._value.shape
return self._internal_value.shape
@shape.setter
def shape(self, value):
if isinstance(self.value, np.generic):
if value not in ((), (1,)):
raise ValueError("Cannot assign this shape to a scalar quantity")
else:
self.value.shape = value
@property
def size(self):
"""The size of this parameter's value array."""
return np.size(self.value)
@property
def std(self):
"""Standard deviation, if available from fit."""
return self._std
@std.setter
def std(self, value):
self._std = value
@property
def prior(self):
return self._prior
@prior.setter
def prior(self, val):
self._prior = val
@property
def posterior(self):
return self._posterior
@posterior.setter
def posterior(self, val):
self._posterior = val
@property
def fixed(self):
"""
Boolean indicating if the parameter is kept fixed during fitting.
"""
return self._fixed
@fixed.setter
def fixed(self, value):
"""Fix a parameter."""
if not isinstance(value, bool):
raise ValueError("Value must be boolean")
self._fixed = value
@property
def tied(self):
"""
Indicates that this parameter is linked to another one.
A callable which provides the relationship of the two parameters.
"""
return self._tied
@tied.setter
def tied(self, value):
"""Tie a parameter."""
if not callable(value) and value not in (False, None):
raise TypeError("Tied must be a callable or set to False or None")
self._tied = value
@property
def bounds(self):
"""The minimum and maximum values of a parameter as a tuple."""
return self._bounds
@bounds.setter
def bounds(self, value):
"""Set the minimum and maximum values of a parameter from a tuple."""
_min, _max = value
if _min is not None:
if not isinstance(_min, (numbers.Number, Quantity)):
raise TypeError("Min value must be a number or a Quantity")
if isinstance(_min, Quantity):
_min = float(_min.value)
else:
_min = float(_min)
if _max is not None:
if not isinstance(_max, (numbers.Number, Quantity)):
raise TypeError("Max value must be a number or a Quantity")
if isinstance(_max, Quantity):
_max = float(_max.value)
else:
_max = float(_max)
self._bounds = (_min, _max)
@property
def min(self):
"""A value used as a lower bound when fitting a parameter."""
return self.bounds[0]
@min.setter
def min(self, value):
"""Set a minimum value of a parameter."""
self.bounds = (value, self.max)
@property
def max(self):
"""A value used as an upper bound when fitting a parameter."""
return self.bounds[1]
@max.setter
def max(self, value):
"""Set a maximum value of a parameter."""
self.bounds = (self.min, value)
@property
def validator(self):
"""
Used as a decorator to set the validator method for a `Parameter`.
The validator method validates any value set for that parameter.
It takes two arguments--``self``, which refers to the `Model`
instance (remember, this is a method defined on a `Model`), and
the value being set for this parameter. The validator method's
return value is ignored, but it may raise an exception if the value
set on the parameter is invalid (typically an `InputParameterError`
should be raised, though this is not currently a requirement).
"""
def validator(func, self=self):
if callable(func):
self._validator = func
return self
else:
raise ValueError(
"This decorator method expects a callable.\n"
"The use of this method as a direct validator is\n"
"deprecated; use the new validate method instead\n"
)
return validator
def validate(self, value):
"""Run the validator on this parameter."""
if self._validator is not None and self._model is not None:
self._validator(self._model, value)
def copy(
self,
name=None,
description=None,
default=None,
unit=None,
getter=None,
setter=None,
fixed=False,
tied=False,
min=None,
max=None,
bounds=None,
prior=None,
posterior=None,
):
"""
Make a copy of this `Parameter`, overriding any of its core attributes
in the process (or an exact copy).
The arguments to this method are the same as those for the `Parameter`
initializer. This simply returns a new `Parameter` instance with any
or all of the attributes overridden, and so returns the equivalent of:
.. code:: python
Parameter(self.name, self.description, ...)
"""
kwargs = locals().copy()
del kwargs["self"]
for key, value in kwargs.items():
if value is None:
# Annoying special cases for min/max where are just aliases for
# the components of bounds
if key in ("min", "max"):
continue
else:
if hasattr(self, key):
value = getattr(self, key)
elif hasattr(self, "_" + key):
value = getattr(self, "_" + key)
kwargs[key] = value
return self.__class__(**kwargs)
@property
def model(self):
"""Return the model this parameter is associated with."""
return self._model
@model.setter
def model(self, value):
self._model = value
self._setter = self._create_value_wrapper(self._setter, value)
self._getter = self._create_value_wrapper(self._getter, value)
if self._model_required:
if self._default is not None:
self.value = self._default
else:
self._value = None
@property
def _raw_value(self):
"""
Currently for internal use only.
Like Parameter.value but does not pass the result through
Parameter.getter. By design this should only be used from bound
parameters.
This will probably be removed are retweaked at some point in the
process of rethinking how parameter values are stored/updated.
"""
if self._setter:
return self._internal_value
return self.value
def _create_value_wrapper(self, wrapper, model):
"""Wraps a getter/setter function to support optionally passing in
a reference to the model object as the second argument.
If a model is tied to this parameter and its getter/setter supports
a second argument then this creates a partial function using the model
instance as the second argument.
"""
if isinstance(wrapper, np.ufunc):
if wrapper.nin != 1:
raise TypeError(
"A numpy.ufunc used for Parameter "
"getter/setter may only take one input "
"argument"
)
elif wrapper is None:
# Just allow non-wrappers to fall through silently, for convenience
return None
else:
inputs, _ = get_inputs_and_params(wrapper)
nargs = len(inputs)
if nargs == 1:
pass
elif nargs == 2:
self._model_required = True
if model is not None:
# Don't make a partial function unless we're tied to a
# specific model instance
model_arg = inputs[1].name
wrapper = functools.partial(wrapper, **{model_arg: model})
else:
raise TypeError(
"Parameter getter/setter must be a function "
"of either one or two arguments"
)
return wrapper
def __array__(self, dtype=None):
# Make np.asarray(self) work a little more straightforwardly
arr = np.asarray(self.value, dtype=dtype)
if self.unit is not None:
arr = Quantity(arr, self.unit, copy=False, subok=True)
return arr
def __bool__(self):
return bool(np.all(self.value))
__add__ = _binary_arithmetic_operation(operator.add)
__radd__ = _binary_arithmetic_operation(operator.add, reflected=True)
__sub__ = _binary_arithmetic_operation(operator.sub)
__rsub__ = _binary_arithmetic_operation(operator.sub, reflected=True)
__mul__ = _binary_arithmetic_operation(operator.mul)
__rmul__ = _binary_arithmetic_operation(operator.mul, reflected=True)
__pow__ = _binary_arithmetic_operation(operator.pow)
__rpow__ = _binary_arithmetic_operation(operator.pow, reflected=True)
__truediv__ = _binary_arithmetic_operation(operator.truediv)
__rtruediv__ = _binary_arithmetic_operation(operator.truediv, reflected=True)
__eq__ = _binary_comparison_operation(operator.eq)
__ne__ = _binary_comparison_operation(operator.ne)
__lt__ = _binary_comparison_operation(operator.lt)
__gt__ = _binary_comparison_operation(operator.gt)
__le__ = _binary_comparison_operation(operator.le)
__ge__ = _binary_comparison_operation(operator.ge)
__neg__ = _unary_arithmetic_operation(operator.neg)
__abs__ = _unary_arithmetic_operation(operator.abs)
def param_repr_oneline(param):
"""
Like array_repr_oneline but works on `Parameter` objects and supports
rendering parameters with units like quantities.
"""
out = array_repr_oneline(param.value)
if param.unit is not None:
out = f"{out} {param.unit!s}"
return out
|
784c5bb89c8729fe0e9148b9b09feb10249c44275fdec2e0e65a958597a84ddd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
# pylint: disable=invalid-name, protected-access, redefined-outer-name
import abc
import copy
import functools
import inspect
import itertools
import operator
import types
from collections import defaultdict, deque
from inspect import signature
from itertools import chain
import numpy as np
from astropy.nddata.utils import add_array, extract_array
from astropy.table import Table
from astropy.units import Quantity, UnitsError, dimensionless_unscaled
from astropy.units.utils import quantity_asanyarray
from astropy.utils import (
IncompatibleShapeError,
check_broadcast,
find_current_module,
indent,
isiterable,
metadata,
sharedmethod,
)
from astropy.utils.codegen import make_function_with_signature
from .bounding_box import CompoundBoundingBox, ModelBoundingBox
from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline
from .utils import (
_combine_equivalency_dict,
_ConstraintsDict,
_SpecialOperatorsDict,
combine_labels,
get_inputs_and_params,
make_binary_operator_eval,
)
__all__ = [
"Model",
"FittableModel",
"Fittable1DModel",
"Fittable2DModel",
"CompoundModel",
"fix_inputs",
"custom_model",
"ModelDefinitionError",
"bind_bounding_box",
"bind_compound_bounding_box",
]
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
"""
return lambda left, right: CompoundModel(oper, left, right, **kwargs)
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions."""
class _ModelMeta(abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
def __new__(mcls, name, bases, members, **kwds):
# See the docstring for _is_dynamic above
if "_is_dynamic" not in members:
members["_is_dynamic"] = mcls._is_dynamic
opermethods = [
("__add__", _model_oper("+")),
("__sub__", _model_oper("-")),
("__mul__", _model_oper("*")),
("__truediv__", _model_oper("/")),
("__pow__", _model_oper("**")),
("__or__", _model_oper("|")),
("__and__", _model_oper("&")),
("_fix_inputs", _model_oper("fix_inputs")),
]
members["_parameters_"] = {
k: v for k, v in members.items() if isinstance(v, Parameter)
}
for opermethod, opercall in opermethods:
members[opermethod] = opercall
cls = super().__new__(mcls, name, bases, members, **kwds)
param_names = list(members["_parameters_"])
# Need to walk each base MRO to collect all parameter names
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
# Preserve order of definitions
param_names = list(tbase._parameters_) + param_names
# Remove duplicates (arising from redefinition in subclass).
param_names = list(dict.fromkeys(param_names))
if cls._parameters_:
if hasattr(cls, "_param_names"):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(param_names)
else:
cls.param_names = tuple(param_names)
return cls
def __init__(cls, name, bases, members, **kwds):
super().__init__(name, bases, members, **kwds)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
pdict = {}
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
for parname, val in cls._parameters_.items():
pdict[parname] = val
cls._handle_special_methods(members, pdict)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith("_abc_"):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ("__init__", "__call__"):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith("_") or inspect.isabstract(cls))
def rename(cls, name=None, inputs=None, outputs=None):
"""
Creates a copy of this model class with a new name, inputs or outputs.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class 'astropy.modeling.core.SkyRotation'>
Name: SkyRotation (Rotation2D)
N_inputs: 2
N_outputs: 2
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
if name is None:
name = cls.name
if inputs is None:
inputs = cls.inputs
else:
if not isinstance(inputs, tuple):
raise TypeError("Expected 'inputs' to be a tuple of strings.")
elif len(inputs) != len(cls.inputs):
raise ValueError(f"{cls.name} expects {len(cls.inputs)} inputs")
if outputs is None:
outputs = cls.outputs
else:
if not isinstance(outputs, tuple):
raise TypeError("Expected 'outputs' to be a tuple of strings.")
elif len(outputs) != len(cls.outputs):
raise ValueError(f"{cls.name} expects {len(cls.outputs)} outputs")
new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs})
new_cls.__module__ = modname
new_cls.__qualname__ = name
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get("inverse")
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get("bounding_box")
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = ModelBoundingBox.validate(
cls, bounding_box, _preserve_ignore=True
)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of ModelBoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
f"The bounding_box method for {cls.name} is not correctly "
"defined: If defined as a method all arguments to that "
"method (besides self) must be keyword arguments with "
"default values that can be used to compute a default "
"bounding box."
)
kwargs.append((param.name, param.default))
__call__.__signature__ = sig
return type(
f"{cls.name}ModelBoundingBox", (ModelBoundingBox,), {"__call__": __call__}
)
def _handle_special_methods(cls, members, pdict):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, "__qualname__"):
wrapper.__qualname__ = f"{cls.__qualname__}.{wrapper.__name__}"
if (
"__call__" not in members
and "n_inputs" in members
and isinstance(members["n_inputs"], int)
and members["n_inputs"] > 0
):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
args = ("self",)
kwargs = {
"model_set_axis": None,
"with_bounding_box": False,
"fill_value": np.nan,
"equivalencies": None,
"inputs_map": None,
}
new_call = make_function_with_signature(
__call__, args, kwargs, varargs="inputs", varkwargs="new_inputs"
)
# The following makes it look like __call__
# was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if (
"__init__" not in members
and not inspect.isabstract(cls)
and cls._parameters_
):
# Build list of all parameters including inherited ones
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional
# arguments
if all(p.default is not None for p in pdict.values()):
args = ("self",)
kwargs = []
for param_name, param_val in pdict.items():
default = param_val.default
unit = param_val.unit
# If the unit was specified in the parameter but the
# default is not a Quantity, attach the unit to the
# default.
if unit is not None:
default = Quantity(default, unit, copy=False, subok=True)
kwargs.append((param_name, default))
else:
args = ("self",) + tuple(pdict.keys())
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs="kwargs"
)
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
_fix_inputs = _model_oper("fix_inputs")
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif inspect.isabstract(base) or base.__name__.startswith("_"):
break
bases.append(base.name)
if bases:
return f"{cls.name} ({' -> '.join(bases)})"
return cls.name
try:
default_keywords = [
("Name", format_inheritance(cls)),
("N_inputs", cls.n_inputs),
("N_outputs", cls.n_outputs),
]
if cls.param_names:
default_keywords.append(("Fittable parameters", cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append(f"{keyword}: {value}")
return "\n".join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ("eqcons", "ineqcons")
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
n_inputs = 0
"""The number of inputs."""
n_outputs = 0
""" The number of outputs."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
_has_inverse_bounding_box = False
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
# Covariance matrix can be set by fitter if available.
# If cov_matrix is available, then std will set as well
_cov_matrix = None
_stds = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
self._default_inputs_outputs()
if meta is not None:
self.meta = meta
self._name = name
# add parameters to instance level by walking MRO list
mro = self.__class__.__mro__
for cls in mro:
if issubclass(cls, Model):
for parname, val in cls._parameters_.items():
newpar = copy.deepcopy(val)
newpar.model = self
if parname not in self.__dict__:
self.__dict__[parname] = newpar
self._initialize_constraints(kwargs)
kwargs = self._initialize_setters(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_slices()
self._initialize_unit_support()
def _default_inputs_outputs(self):
if self.n_inputs == 1 and self.n_outputs == 1:
self._inputs = ("x",)
self._outputs = ("y",)
elif self.n_inputs == 2 and self.n_outputs == 1:
self._inputs = ("x", "y")
self._outputs = ("z",)
else:
try:
self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs))
self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs))
except TypeError:
# self.n_inputs and self.n_outputs are properties
# This is the case when subclasses of Model do not define
# ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``.
self._inputs = ()
self._outputs = ()
def _initialize_setters(self, kwargs):
"""
This exists to inject defaults for settable properties for models
originating from `custom_model`.
"""
if hasattr(self, "_settable_properties"):
setters = {
name: kwargs.pop(name, default)
for name, default in self._settable_properties.items()
}
for name, value in setters.items():
setattr(self, name, value)
return kwargs
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, val):
if len(val) != self.n_inputs:
raise ValueError(
f"Expected {self.n_inputs} number of inputs, got {len(val)}."
)
self._inputs = val
self._initialize_unit_support()
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, val):
if len(val) != self.n_outputs:
raise ValueError(
f"Expected {self.n_outputs} number of outputs, got {len(val)}."
)
self._outputs = val
@property
def n_inputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``inputs`` as class variables is removed.
if hasattr(self.__class__, "n_inputs") and isinstance(
self.__class__.n_inputs, property
):
try:
return len(self.__class__.inputs)
except TypeError:
try:
return len(self.inputs)
except AttributeError:
return 0
return self.__class__.n_inputs
@property
def n_outputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``outputs`` as class variables is removed.
if hasattr(self.__class__, "n_outputs") and isinstance(
self.__class__.n_outputs, property
):
try:
return len(self.__class__.outputs)
except TypeError:
try:
return len(self.outputs)
except AttributeError:
return 0
return self.__class__.n_outputs
def _calculate_separability_matrix(self):
"""
This is a hook which customises the behavior of modeling.separable.
This allows complex subclasses to customise the separability matrix.
If it returns `NotImplemented` the default behavior is used.
"""
return NotImplemented
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolean value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {
key: self._input_units_strict for key in self.inputs
}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {
key: self._input_units_allow_dimensionless for key in self.inputs
}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
@staticmethod
def _strip_ones(intup):
return tuple(item for item in intup if item != 1)
def __setattr__(self, attr, value):
if isinstance(self, CompoundModel):
param_names = self._param_names
param_names = self.param_names
if param_names is not None and attr in self.param_names:
param = self.__dict__[attr]
value = _tofloat(value)
if param._validator is not None:
param._validator(self, value)
# check consistency with previous shape and size
eshape = self._param_metrics[attr]["shape"]
if eshape == ():
eshape = (1,)
vshape = np.array(value).shape
if vshape == ():
vshape = (1,)
esize = self._param_metrics[attr]["size"]
if np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones(
eshape
):
raise InputParameterError(
f"Value for parameter {attr} does not match shape or size\nexpected"
f" by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})"
)
if param.unit is None:
if isinstance(value, Quantity):
param._unit = value.unit
param.value = value.value
else:
param.value = value
else:
if not isinstance(value, Quantity):
raise UnitsError(
f"The '{param.name}' parameter should be given as a"
" Quantity because it was originally "
"initialized as a Quantity"
)
param._unit = value.unit
param.value = value.value
else:
if attr in ["fittable", "linear"]:
self.__dict__[attr] = value
else:
super().__setattr__(attr, value)
def _pre_evaluate(self, *args, **kwargs):
"""
Model specific input setup that needs to occur prior to model evaluation.
"""
# Broadcast inputs into common size
inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs)
# Setup actual model evaluation method
parameters = self._param_sets(raw=True, units=True)
def evaluate(_inputs):
return self.evaluate(*chain(_inputs, parameters))
return evaluate, inputs, broadcasted_shapes, kwargs
def get_bounding_box(self, with_bbox=True):
"""
Return the ``bounding_box`` of a model if it exists or ``None``
otherwise.
Parameters
----------
with_bbox :
The value of the ``with_bounding_box`` keyword argument
when calling the model. Default is `True` for usage when
looking up the model's ``bounding_box`` without risk of error.
"""
bbox = None
if not isinstance(with_bbox, bool) or with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
pass
if isinstance(bbox, CompoundBoundingBox) and not isinstance(
with_bbox, bool
):
bbox = bbox[with_bbox]
return bbox
@property
def _argnames(self):
"""The inputs used to determine input_shape for bounding_box evaluation."""
return self.inputs
def _validate_input_shape(
self, _input, idx, argnames, model_set_axis, check_model_set_axis
):
"""Perform basic validation of a single model input's shape.
The shape has the minimum dimensions for the given model_set_axis.
Returns the shape of the input if validation succeeds.
"""
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
f"For model_set_axis={model_set_axis}, all inputs must be at "
f"least {model_set_axis + 1}-dimensional."
)
if input_shape[model_set_axis] != self._n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
f"Input argument '{argname}' does not have the correct dimensions"
f" in model_set_axis={model_set_axis} for a model set with"
f" n_models={self._n_models}."
)
return input_shape
def _validate_input_shapes(self, inputs, argnames, model_set_axis):
"""
Perform basic validation of model inputs
--that they are mutually broadcastable and that they have
the minimum dimensions for the given model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = self._n_models > 1 and model_set_axis is not False
all_shapes = []
for idx, _input in enumerate(inputs):
all_shapes.append(
self._validate_input_shape(
_input, idx, argnames, model_set_axis, check_model_set_axis
)
)
input_shape = check_broadcast(*all_shapes)
if input_shape is None:
raise ValueError(
"All inputs must have identical shapes or must be scalars."
)
return input_shape
def input_shape(self, inputs):
"""Get input shape for bounding_box evaluation."""
return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis)
def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox):
"""Generic model evaluation routine.
Selects and evaluates model with or without bounding_box enforcement.
"""
# Evaluate the model using the prepared evaluation method either
# enforcing the bounding_box or not.
bbox = self.get_bounding_box(with_bbox)
if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None:
outputs = bbox.evaluate(evaluate, _inputs, fill_value)
else:
outputs = evaluate(_inputs)
return outputs
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
Model specific post evaluation processing of outputs.
"""
if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
return outputs
@property
def bbox_with_units(self):
return not isinstance(self, CompoundModel)
def __call__(self, *args, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
# Turn any keyword arguments into positional arguments.
args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs)
# Read model evaluation related parameters
with_bbox = kwargs.pop("with_bounding_box", False)
fill_value = kwargs.pop("fill_value", np.nan)
# prepare for model evaluation (overridden in CompoundModel)
evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(
*args, **kwargs
)
outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox)
# post-process evaluation results (overridden in CompoundModel)
return self._post_evaluate(
inputs, outputs, broadcasted_shapes, with_bbox, **kwargs
)
def _get_renamed_inputs_as_positional(self, *args, **kwargs):
def _keyword2positional(kwargs):
# Inputs were passed as keyword (not positional) arguments.
# Because the signature of the ``__call__`` is defined at
# the class level, the name of the inputs cannot be changed at
# the instance level and the old names are always present in the
# signature of the method. In order to use the new names of the
# inputs, the old names are taken out of ``kwargs``, the input
# values are sorted in the order of self.inputs and passed as
# positional arguments to ``__call__``.
# These are the keys that are always present as keyword arguments.
keys = [
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
]
new_inputs = {}
# kwargs contain the names of the new inputs + ``keys``
allkeys = list(kwargs.keys())
# Remove the names of the new inputs from kwargs and save them
# to a dict ``new_inputs``.
for key in allkeys:
if key not in keys:
new_inputs[key] = kwargs[key]
del kwargs[key]
return new_inputs, kwargs
n_args = len(args)
new_inputs, kwargs = _keyword2positional(kwargs)
n_all_args = n_args + len(new_inputs)
if n_all_args < self.n_inputs:
raise ValueError(
f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}"
)
elif n_all_args > self.n_inputs:
raise ValueError(
f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}"
)
if n_args == 0:
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
new_args.append(new_inputs[k])
elif n_args != self.n_inputs:
# Some inputs are passed as positional, others as keyword arguments.
args = list(args)
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
if k in new_inputs:
new_args.append(new_inputs[k])
else:
new_args.append(args[0])
del args[0]
else:
new_args = args
return new_args, kwargs
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on :ref:`astropy:modeling-model-sets`
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
self._parameters_to_array()
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
f"parameters array: {e!r}"
)
self._array_to_parameters()
@property
def sync_constraints(self):
"""
This is a boolean property that indicates whether or not accessing constraints
automatically check the constituent models current values. It defaults to True
on creation of a model, but for fitting purposes it should be set to False
for performance reasons.
"""
if not hasattr(self, "_sync_constraints"):
self._sync_constraints = True
return self._sync_constraints
@sync_constraints.setter
def sync_constraints(self, value):
if not isinstance(value, bool):
raise ValueError("sync_constraints only accepts True or False as values")
self._sync_constraints = value
@property
def fixed(self):
"""
A ``dict`` mapping parameter names to their fixed constraint.
"""
if not hasattr(self, "_fixed") or self.sync_constraints:
self._fixed = _ConstraintsDict(self, "fixed")
return self._fixed
@property
def bounds(self):
"""
A ``dict`` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
if not hasattr(self, "_bounds") or self.sync_constraints:
self._bounds = _ConstraintsDict(self, "bounds")
return self._bounds
@property
def tied(self):
"""
A ``dict`` mapping parameter names to their tied constraint.
"""
if not hasattr(self, "_tied") or self.sync_constraints:
self._tied = _ConstraintsDict(self, "tied")
return self._tied
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._mconstraints["eqcons"]
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._mconstraints["ineqcons"]
def has_inverse(self):
"""
Returns True if the model has an analytic or user
inverse defined.
"""
try:
self.inverse
except NotImplementedError:
return False
return True
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
result = self._inverse()
if result is not NotImplemented:
if not self._has_inverse_bounding_box:
result.bounding_box = None
return result
raise NotImplementedError(
"No analytical or user-supplied inverse transform "
"has been implemented for this model."
)
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse."
)
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
try:
del self._user_inverse
except AttributeError:
pass
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
raise `NotImplementedError` for no bounding_box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`astropy:bounding-boxes`
The limits are ordered according to the `numpy` ``'C'`` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: No bounding box is defined for this model
(note: the bounding box was explicitly disabled for this model;
use `del model.bounding_box` to restore the default bounding box,
if one is defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model)."
)
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError("No bounding box is defined for this model.")
elif isinstance(self._bounding_box, ModelBoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return ModelBoundingBox.validate(self, self._bounding_box())
else:
# The only other allowed possibility is that it's a ModelBoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), model=self)()
return self._bounding_box(bounding_box, model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif isinstance(bounding_box, (CompoundBoundingBox, dict)):
cls = CompoundBoundingBox
elif isinstance(self._bounding_box, type) and issubclass(
self._bounding_box, ModelBoundingBox
):
cls = self._bounding_box
else:
cls = ModelBoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
def set_slice_args(self, *args):
if isinstance(self._user_bounding_box, CompoundBoundingBox):
self._user_bounding_box.slice_args = args
else:
raise RuntimeError("The bounding_box for this model is not compound")
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def cov_matrix(self):
"""
Fitter should set covariance matrix, if available.
"""
return self._cov_matrix
@cov_matrix.setter
def cov_matrix(self, cov):
self._cov_matrix = cov
unfix_untied_params = [
p
for p in self.param_names
if (self.fixed[p] is False) and (self.tied[p] is False)
]
if type(cov) == list: # model set
param_stds = []
for c in cov:
param_stds.append(
[np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)]
)
for p, param_name in enumerate(unfix_untied_params):
par = getattr(self, param_name)
par.std = [item[p] for item in param_stds]
setattr(self, param_name, par)
else:
param_stds = [
np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)
]
for param_name in unfix_untied_params:
par = getattr(self, param_name)
par.std = param_stds.pop(0)
setattr(self, param_name, par)
@property
def stds(self):
"""
Standard deviation of parameters, if covariance matrix is available.
"""
return self._stds
@stds.setter
def stds(self, stds):
self._stds = stds
@property
def separable(self):
"""A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
raise NotImplementedError(
'The "separable" property is not defined for '
f"model {self.__class__.__name__}"
)
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have
been converted to the right units for the data, then the units have
been stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not
necessarily the units of the input data, but are derived from them.
Model subclasses that want fitting to work in the presence of
quantities need to define a ``_parameter_units_for_data_units`` method
that takes the input and output units (as two dictionaries) and
returns a dictionary giving the target units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, CompoundModel):
model.strip_units_from_tree()
return model
def output_units(self, **kwargs):
"""
Return a dictionary of output units for this model given a dictionary
of fitting inputs and outputs.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
This method will force extra model evaluations, which maybe computationally
expensive. To avoid this, one can add a return_units property to the model,
see :ref:`astropy:models_return_units`.
"""
units = self.return_units
if units is None or units == {}:
inputs = {inp: kwargs[inp] for inp in self.inputs}
values = self(**inputs)
if self.n_outputs == 1:
values = (values,)
units = {
out: getattr(values[index], "unit", dimensionless_unscaled)
for index, out in enumerate(self.outputs)
}
return units
def strip_units_from_tree(self):
for item in self._leaflist:
for parname in item.param_names:
par = getattr(item, parname)
par._set_unit(None, force=True)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units
of the input data, but are derived from them. Model subclasses that
want fitting to work in the presence of quantities need to define a
``_parameter_units_for_data_units`` method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly,
# hence the call to ``_set_unit``.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
return any(getattr(self, param).unit is not None for param in self.param_names)
@property
def _supports_unit_fitting(self):
# If the model has a ``_parameter_units_for_data_units`` method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, "_parameter_units_for_data_units")
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel,
# important when using add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, "_input_units"):
return self._input_units
elif hasattr(self.evaluate, "__annotations__"):
annotations = self.evaluate.__annotations__.copy()
annotations.pop("return", None)
if annotations:
# If there are not annotations for all inputs this will error.
return {name: annotations[name] for name in self.inputs}
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the
output of evaluate should be in, and returns a dictionary mapping
outputs to units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, "_return_units"):
return self._return_units
elif hasattr(self.evaluate, "__annotations__"):
return self.evaluate.__annotations__.get("return", None)
else:
# None means any unit is accepted
return None
def _prepare_inputs_single_model(self, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if self.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
f"self input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} cannot be broadcast with parameter"
f" {param.name!r} of shape {param.shape!r}."
)
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if self.n_outputs > self.n_inputs:
extra_outputs = self.n_outputs - self.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_self)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
@staticmethod
def _remove_axes_from_shape(shape, axis):
"""
Given a shape tuple as the first input, construct a new one by removing
that particular axis from the shape and all preceding axes. Negative axis
numbers are permittted, where the axis is relative to the last axis.
"""
if len(shape) == 0:
return shape
if axis < 0:
axis = len(shape) + axis
return shape[:axis] + shape[axis + 1 :]
if axis >= len(shape):
axis = len(shape) - 1
shape = shape[axis + 1 :]
return shape
def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs):
reshaped = []
pivots = []
model_set_axis_param = self.model_set_axis # needed to reshape param
for idx, _input in enumerate(inputs):
max_param_shape = ()
if self._n_models > 1 and model_set_axis_input is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (
_input.shape[:model_set_axis_input]
+ _input.shape[model_set_axis_input + 1 :]
)
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(
input_shape,
self._remove_axes_from_shape(param.shape, model_set_axis_param),
)
except IncompatibleShapeError:
raise ValueError(
f"Model input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} "
f"cannot be broadcast with parameter {param.name!r} of shape "
f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}."
)
if len(param.shape) - 1 > len(max_param_shape):
max_param_shape = self._remove_axes_from_shape(
param.shape, model_set_axis_param
)
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis_input is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model_set_axis_param
else:
pivot = input_ndim - len(max_param_shape)
new_shape = _input.shape[:pivot] + (1,) + _input.shape[pivot:]
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = self.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (
_input.shape[: pivot + 1] + new_axes + _input.shape[pivot + 1 :]
)
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if self.n_inputs < self.n_outputs:
pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs))
return reshaped, (pivots,)
def prepare_inputs(
self, *inputs, model_set_axis=None, equivalencies=None, **kwargs
):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
self._validate_input_shapes(inputs, self.inputs, model_set_axis)
inputs_map = kwargs.get("inputs_map", None)
inputs = self._validate_input_units(inputs, equivalencies, inputs_map)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if self._n_models == 1:
return self._prepare_inputs_single_model(params, inputs, **kwargs)
else:
return self._prepare_inputs_model_set(
params, inputs, model_set_axis, **kwargs
)
def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# If a leaflist is provided that means this is in the context of
# a compound model and it is necessary to create the appropriate
# alias for the input coordinate name for the equivalencies dict
if inputs_map:
edict = {}
for mod, mapping in inputs_map:
if self is mod:
edict[mapping[0]] = equivalencies[mapping[1]]
else:
edict = equivalencies
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
self.inputs, edict, self.input_units_equivalencies
)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(
input_unit, equivalencies=input_units_equivalencies[input_name]
):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is
# because some equivalencies are non-linear, and
# we need to be sure that we evaluate the model in
# its own frame of reference. If input_units_strict
# is set, we also need to convert to the input units.
if (
len(input_units_equivalencies) > 0
or self.input_units_strict[input_name]
):
inputs[i] = inputs[i].to(
input_unit,
equivalencies=input_units_equivalencies[input_name],
)
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
"could not be converted to "
"required dimensionless "
"input"
)
else:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
" could not be "
"converted to required input"
f" units of {input_unit} ({input_unit.physical_type})"
)
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (
not self.input_units_allow_dimensionless[input_name]
and input_unit is not dimensionless_unscaled
and input_unit is not None
):
if np.any(inputs[i] != 0):
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}',"
" (dimensionless), could not be converted to required "
f"input units of {input_unit} "
f"({input_unit.physical_type})"
)
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple(
Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)
)
return outputs
@staticmethod
def _prepare_output_single_model(output, broadcast_shape):
if broadcast_shape is not None:
if not broadcast_shape:
return output.item()
else:
try:
return output.reshape(broadcast_shape)
except ValueError:
try:
return output.item()
except ValueError:
return output
return output
def _prepare_outputs_single_model(self, outputs, broadcasted_shapes):
outputs = list(outputs)
for idx, output in enumerate(outputs):
try:
broadcast_shape = check_broadcast(*broadcasted_shapes[0])
except (IndexError, TypeError):
broadcast_shape = broadcasted_shapes[0][idx]
outputs[idx] = self._prepare_output_single_model(output, broadcast_shape)
return tuple(outputs)
def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis):
pivots = broadcasted_shapes[0]
# If model_set_axis = False was passed then use
# self._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = self.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot, model_set_axis)
return tuple(outputs)
def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs):
model_set_axis = kwargs.get("model_set_axis", None)
if len(self) == 1:
return self._prepare_outputs_single_model(outputs, broadcasted_shapes)
else:
return self._prepare_outputs_model_set(
outputs, broadcasted_shapes, model_set_axis
)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return self.copy()
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
def coerce_units(
self,
input_units=None,
return_units=None,
input_units_equivalencies=None,
input_units_allow_dimensionless=False,
):
"""
Attach units to this (unitless) model.
Parameters
----------
input_units : dict or tuple, optional
Input units to attach. If dict, each key is the name of a model input,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.inputs`.
return_units : dict or tuple, optional
Output units to attach. If dict, each key is the name of a model output,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.outputs`.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : bool or dict, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
Returns
-------
`CompoundModel`
A `CompoundModel` composed of the current model plus
`~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units.
Raises
------
ValueError
If the current model already has units.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True)
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
from .mappings import UnitsMapping
result = self
if input_units is not None:
if self.input_units is not None:
model_units = self.input_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.inputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify input_units for model with existing input units"
)
if isinstance(input_units, dict):
if input_units.keys() != set(self.inputs):
message = (
f"""input_units keys ({", ".join(input_units.keys())}) """
f"""do not match model inputs ({", ".join(self.inputs)})"""
)
raise ValueError(message)
input_units = [input_units[i] for i in self.inputs]
if len(input_units) != self.n_inputs:
message = (
"input_units length does not match n_inputs: "
f"expected {self.n_inputs}, received {len(input_units)}"
)
raise ValueError(message)
mapping = tuple(
(unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units)
)
input_mapping = UnitsMapping(
mapping,
input_units_equivalencies=input_units_equivalencies,
input_units_allow_dimensionless=input_units_allow_dimensionless,
)
input_mapping.inputs = self.inputs
input_mapping.outputs = self.inputs
result = input_mapping | result
if return_units is not None:
if self.return_units is not None:
model_units = self.return_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.outputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify return_units for model "
"with existing output units"
)
if isinstance(return_units, dict):
if return_units.keys() != set(self.outputs):
message = (
f"""return_units keys ({", ".join(return_units.keys())}) """
f"""do not match model outputs ({", ".join(self.outputs)})"""
)
raise ValueError(message)
return_units = [return_units[i] for i in self.outputs]
if len(return_units) != self.n_outputs:
message = (
"return_units length does not match n_outputs: "
f"expected {self.n_outputs}, received {len(return_units)}"
)
raise ValueError(message)
mapping = tuple(
(model_units.get(i), unit)
for i, unit in zip(self.outputs, return_units)
)
return_mapping = UnitsMapping(mapping)
return_mapping.inputs = self.outputs
return_mapping.outputs = self.outputs
result = result | return_mapping
return result
@property
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
for ckey, cvalue in values.items():
param = getattr(self, ckey)
setattr(param, constraint, cvalue)
self._mconstraints = {}
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._mconstraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
n_models = kwargs.pop("n_models", None)
if not (
n_models is None
or (isinstance(n_models, (int, np.integer)) and n_models >= 1)
):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
f"(got {n_models!r})"
)
model_set_axis = kwargs.pop("model_set_axis", None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (
model_set_axis is False
or np.issubdtype(type(model_set_axis), np.integer)
):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
f"model in a set of models (got {model_set_axis!r})."
)
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = set()
if len(args) > len(self.param_names):
raise TypeError(
f"{self.__class__.__name__}.__init__() takes at most "
f"{len(self.param_names)} positional arguments ({len(args)} given)"
)
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
param_name = self.param_names[idx]
params.add(param_name)
if not isinstance(arg, Parameter):
value = quantity_asanyarray(arg, dtype=float)
else:
value = arg
self._initialize_parameter_value(param_name, value)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
f"{self.__class__.__name__}.__init__() got multiple values for"
f" parameter {param_name!r}"
)
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray
# because if any of the arguments are quantities, we need
# to return a Quantity object not a plain Numpy array.
value = quantity_asanyarray(value, dtype=float)
params.add(param_name)
self._initialize_parameter_value(param_name, value)
# Now deal with case where param_name is not supplied by args or kwargs
for param_name in self.param_names:
if param_name not in params:
self._initialize_parameter_value(param_name, None)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
f"{self.__class__.__name__}.__init__() got an unrecognized"
f" parameter {kwarg!r}"
)
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name in self.param_names:
value = getattr(self, name)
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension at least"
f" {min_ndim} for model_set_axis={model_set_axis} (the value"
f" given for {name!r} is only {param_ndim}-dimensional)"
)
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
f"Inconsistent dimensions for parameter {name!r} for"
f" {n_models} model sets. The length of axis"
f" {model_set_axis} must be the same for all input parameter"
" values"
)
self._check_param_broadcast(max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(None)
self._n_models = n_models
# now validate parameters
for name in params:
param = getattr(self, name)
if param._validator is not None:
param._validator(self, param.value)
def _initialize_parameter_value(self, param_name, value):
"""Mostly deals with consistency checks and determining unit issues."""
if isinstance(value, Parameter):
self.__dict__[param_name] = value
return
param = getattr(self, param_name)
# Use default if value is not provided
if value is None:
default = param.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
f"{self.__class__.__name__}.__init__() requires a value for "
f"parameter {param_name!r}"
)
value = default
unit = param.unit
else:
if isinstance(value, Quantity):
unit = value.unit
value = value.value
else:
unit = None
if unit is None and param.unit is not None:
raise InputParameterError(
f"{self.__class__.__name__}.__init__() requires a Quantity for"
f" parameter {param_name!r}"
)
param._unit = unit
param._set_unit(unit, force=True)
param.internal_unit = None
if param._setter is not None:
if unit is not None:
_val = param._setter(value * unit)
else:
_val = param._setter(value)
if isinstance(_val, Quantity):
param.internal_unit = _val.unit
param._internal_value = np.array(_val.value)
else:
param.internal_unit = None
param._internal_value = np.array(_val)
else:
param._value = np.array(value)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
def _parameters_to_array(self):
# Now set the parameter values (this will also fill
# self._parameters)
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = param.value
if not isinstance(value, np.ndarray):
value = np.array([value])
self._parameters[param_metrics[name]["slice"]] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
def _array_to_parameters(self):
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = self._parameters[param_metrics[name]["slice"]]
value.shape = param_metrics[name]["shape"]
param.value = value
def _check_param_broadcast(self, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
model_set_axis = self._model_set_axis
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (
param_shape[: model_set_axis + 1]
+ new_axes
+ param_shape[model_set_axis + 1 :]
)
self._param_metrics[name]["broadcast_shape"] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = self.param_names[shape_a_idx]
param_b = self.param_names[shape_b_idx]
raise InputParameterError(
f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with "
f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules."
)
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw and param._setter:
value = param._internal_value
else:
value = param.value
broadcast_shape = self._param_metrics[name].get("broadcast_shape")
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and param.internal_unit is not None:
unit = param.internal_unit
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit, subok=True)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
parts = [repr(a) for a in args]
parts.extend(
f"{name}={param_repr_oneline(getattr(self, name))}"
for name in self.param_names
)
if self.name is not None:
parts.append(f"name={self.name!r}")
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] == value:
continue
parts.append(f"{kwarg}={value!r}")
if len(self) > 1:
parts.append(f"n_models={len(self)}")
return f"<{self.__class__.__name__}({', '.join(parts)})>"
def _format_str(self, keywords=[], defaults={}):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
("Model", self.__class__.__name__),
("Name", self.name),
("Inputs", self.inputs),
("Outputs", self.outputs),
("Model set size", len(self)),
]
parts = [
f"{keyword}: {value}"
for keyword, value in default_keywords
if value is not None
]
for keyword, value in keywords:
if keyword.lower() in defaults and defaults[keyword.lower()] == value:
continue
parts.append(f"{keyword}: {value}")
parts.append("Parameters:")
if len(self) == 1:
columns = [[getattr(self, name).value] for name in self.param_names]
else:
columns = [getattr(self, name).value for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return "\n".join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 1
n_outputs = 1
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 2
n_outputs = 1
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params), f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (
lambda inputs, params: (
f[0](inputs[: f[1]], params) + g[0](inputs[f[1] :], params)
),
f[1] + g[1],
f[2] + g[2],
)
BINARY_OPERATORS = {
"+": _make_arithmetic_operator(operator.add),
"-": _make_arithmetic_operator(operator.sub),
"*": _make_arithmetic_operator(operator.mul),
"/": _make_arithmetic_operator(operator.truediv),
"**": _make_arithmetic_operator(operator.pow),
"|": _composition_operator,
"&": _join_operator,
}
SPECIAL_OPERATORS = _SpecialOperatorsDict()
def _add_special_operator(sop_name, sop):
return SPECIAL_OPERATORS.add(sop_name, sop)
class CompoundModel(Model):
"""
Base class for compound models.
While it can be used directly, the recommended way
to combine models is through the model operators.
"""
def __init__(self, op, left, right, name=None):
self.__dict__["_param_names"] = None
self._n_submodels = None
self.op = op
self.left = left
self.right = right
self._bounding_box = None
self._user_bounding_box = None
self._leaflist = None
self._tdict = None
self._parameters = None
self._parameters_ = None
self._param_metrics = None
if op != "fix_inputs" and len(left) != len(right):
raise ValueError("Both operands must have equal values for n_models")
self._n_models = len(left)
if op != "fix_inputs" and (
(left.model_set_axis != right.model_set_axis) or left.model_set_axis
): # not False and not 0
raise ValueError(
"model_set_axis must be False or 0 and consistent for operands"
)
self._model_set_axis = left.model_set_axis
if op in ["+", "-", "*", "/", "**"] or op in SPECIAL_OPERATORS:
if left.n_inputs != right.n_inputs or left.n_outputs != right.n_outputs:
raise ModelDefinitionError(
"Both operands must match numbers of inputs and outputs"
)
self.n_inputs = left.n_inputs
self.n_outputs = left.n_outputs
self.inputs = left.inputs
self.outputs = left.outputs
elif op == "&":
self.n_inputs = left.n_inputs + right.n_inputs
self.n_outputs = left.n_outputs + right.n_outputs
self.inputs = combine_labels(left.inputs, right.inputs)
self.outputs = combine_labels(left.outputs, right.outputs)
elif op == "|":
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |:"
f" {left.name} (n_inputs={left.n_inputs},"
f" n_outputs={left.n_outputs}) and"
f" {right.name} (n_inputs={right.n_inputs},"
f" n_outputs={right.n_outputs}); n_outputs for the left-hand model"
" must match n_inputs for the right-hand model."
)
self.n_inputs = left.n_inputs
self.n_outputs = right.n_outputs
self.inputs = left.inputs
self.outputs = right.outputs
elif op == "fix_inputs":
if not isinstance(left, Model):
raise ValueError(
'First argument to "fix_inputs" must be an instance of '
"an astropy Model."
)
if not isinstance(right, dict):
raise ValueError(
'Expected a dictionary for second argument of "fix_inputs".'
)
# Dict keys must match either possible indices
# for model on left side, or names for inputs.
self.n_inputs = left.n_inputs - len(right)
# Assign directly to the private attribute (instead of using the setter)
# to avoid asserting the new number of outputs matches the old one.
self._outputs = left.outputs
self.n_outputs = left.n_outputs
newinputs = list(left.inputs)
keys = right.keys()
input_ind = []
for key in keys:
if np.issubdtype(type(key), np.integer):
if key >= left.n_inputs or key < 0:
raise ValueError(
"Substitution key integer value "
"not among possible input choices."
)
if key in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(key)
elif isinstance(key, str):
if key not in left.inputs:
raise ValueError(
"Substitution key string not among possible input choices."
)
# Check to see it doesn't match positional
# specification.
ind = left.inputs.index(key)
if ind in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(ind)
# Remove substituted inputs
input_ind.sort()
input_ind.reverse()
for ind in input_ind:
del newinputs[ind]
self.inputs = tuple(newinputs)
# Now check to see if the input model has bounding_box defined.
# If so, remove the appropriate dimensions and set it for this
# instance.
try:
self.bounding_box = self.left.bounding_box.fix_inputs(self, right)
except NotImplementedError:
pass
else:
raise ModelDefinitionError("Illegal operator: ", self.op)
self.name = name
self._fittable = None
self.fit_deriv = None
self.col_fit_deriv = None
if op in ("|", "+", "-"):
self.linear = left.linear and right.linear
else:
self.linear = False
self.eqcons = []
self.ineqcons = []
self.n_left_params = len(self.left.parameters)
self._map_parameters()
def _get_left_inputs_from_args(self, args):
return args[: self.left.n_inputs]
def _get_right_inputs_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs : self.left.n_inputs + self.right.n_inputs]
elif op == "|" or op == "fix_inputs":
return None
else:
return args[: self.left.n_inputs]
def _get_left_params_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
n_inputs = self.left.n_inputs + self.right.n_inputs
return args[n_inputs : n_inputs + self.n_left_params]
else:
return args[self.left.n_inputs : self.left.n_inputs + self.n_left_params]
def _get_right_params_from_args(self, args):
op = self.op
if op == "fix_inputs":
return None
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params :]
else:
return args[self.left.n_inputs + self.n_left_params :]
def _get_kwarg_model_parameters_as_positional(self, args, kwargs):
# could do it with inserts but rebuilding seems like simpilist way
# TODO: Check if any param names are in kwargs maybe as an intersection of sets?
if self.op == "&":
new_args = list(args[: self.left.n_inputs + self.right.n_inputs])
args_pos = self.left.n_inputs + self.right.n_inputs
else:
new_args = list(args[: self.left.n_inputs])
args_pos = self.left.n_inputs
for param_name in self.param_names:
kw_value = kwargs.pop(param_name, None)
if kw_value is not None:
value = kw_value
else:
try:
value = args[args_pos]
except IndexError:
raise IndexError("Missing parameter or input")
args_pos += 1
new_args.append(value)
return new_args, kwargs
def _apply_operators_to_value_lists(self, leftval, rightval, **kw):
op = self.op
if op == "+":
return binary_operation(operator.add, leftval, rightval)
elif op == "-":
return binary_operation(operator.sub, leftval, rightval)
elif op == "*":
return binary_operation(operator.mul, leftval, rightval)
elif op == "/":
return binary_operation(operator.truediv, leftval, rightval)
elif op == "**":
return binary_operation(operator.pow, leftval, rightval)
elif op == "&":
if not isinstance(leftval, tuple):
leftval = (leftval,)
if not isinstance(rightval, tuple):
rightval = (rightval,)
return leftval + rightval
elif op in SPECIAL_OPERATORS:
return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval)
else:
raise ModelDefinitionError("Unrecognized operator {op}")
def evaluate(self, *args, **kw):
op = self.op
args, kw = self._get_kwarg_model_parameters_as_positional(args, kw)
left_inputs = self._get_left_inputs_from_args(args)
left_params = self._get_left_params_from_args(args)
if op == "fix_inputs":
pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs)))
fixed_inputs = {
key if np.issubdtype(type(key), np.integer) else pos_index[key]: value
for key, value in self.right.items()
}
left_inputs = [
fixed_inputs[ind] if ind in fixed_inputs.keys() else inp
for ind, inp in enumerate(left_inputs)
]
leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params))
if op == "fix_inputs":
return leftval
right_inputs = self._get_right_inputs_from_args(args)
right_params = self._get_right_params_from_args(args)
if op == "|":
if isinstance(leftval, tuple):
return self.right.evaluate(*itertools.chain(leftval, right_params))
else:
return self.right.evaluate(leftval, *right_params)
else:
rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params))
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
@property
def n_submodels(self):
if self._leaflist is None:
self._make_leaflist()
return len(self._leaflist)
@property
def submodel_names(self):
"""Return the names of submodels in a ``CompoundModel``."""
if self._leaflist is None:
self._make_leaflist()
names = [item.name for item in self._leaflist]
nonecount = 0
newnames = []
for item in names:
if item is None:
newnames.append(f"None_{nonecount}")
nonecount += 1
else:
newnames.append(item)
return tuple(newnames)
def both_inverses_exist(self):
"""
if both members of this compound model have inverses return True.
"""
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"CompoundModel.both_inverses_exist is deprecated. Use has_inverse instead.",
AstropyDeprecationWarning,
)
try:
self.left.inverse
self.right.inverse
except NotImplementedError:
return False
return True
def _pre_evaluate(self, *args, **kwargs):
"""
CompoundModel specific input setup that needs to occur prior to
model evaluation.
Note
----
All of the _pre_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
# If equivalencies are provided, necessary to map parameters and pass
# the leaflist as a keyword input for use by model evaluation so that
# the compound model input names can be matched to the model input
# names.
if "equivalencies" in kwargs:
# Restructure to be useful for the individual model lookup
kwargs["inputs_map"] = [
(value[0], (value[1], key)) for key, value in self.inputs_map().items()
]
# Setup actual model evaluation method
def evaluate(_inputs):
return self._evaluate(*_inputs, **kwargs)
return evaluate, args, None, kwargs
@property
def _argnames(self):
"""
No inputs should be used to determine input_shape when handling compound models.
"""
return ()
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
CompoundModel specific post evaluation processing of outputs.
Note
----
All of the _post_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1:
return outputs[0]
return outputs
def _evaluate(self, *args, **kw):
op = self.op
if op != "fix_inputs":
if op != "&":
leftval = self.left(*args, **kw)
if op != "|":
rightval = self.right(*args, **kw)
else:
rightval = None
else:
leftval = self.left(*(args[: self.left.n_inputs]), **kw)
rightval = self.right(*(args[self.left.n_inputs :]), **kw)
if op != "|":
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
elif op == "|":
if isinstance(leftval, tuple):
return self.right(*leftval, **kw)
else:
return self.right(leftval, **kw)
else:
subs = self.right
newargs = list(args)
subinds = []
subvals = []
for key in subs.keys():
if np.issubdtype(type(key), np.integer):
subinds.append(key)
elif isinstance(key, str):
ind = self.left.inputs.index(key)
subinds.append(ind)
subvals.append(subs[key])
# Turn inputs specified in kw into positional indices.
# Names for compound inputs do not propagate to sub models.
kwind = []
kwval = []
for kwkey in list(kw.keys()):
if kwkey in self.inputs:
ind = self.inputs.index(kwkey)
if ind < len(args):
raise ValueError(
"Keyword argument duplicates positional value supplied."
)
kwind.append(ind)
kwval.append(kw[kwkey])
del kw[kwkey]
# Build new argument list
# Append keyword specified args first
if kwind:
kwargs = list(zip(kwind, kwval))
kwargs.sort()
kwindsorted, kwvalsorted = list(zip(*kwargs))
newargs = newargs + list(kwvalsorted)
if subinds:
subargs = list(zip(subinds, subvals))
subargs.sort()
# subindsorted, subvalsorted = list(zip(*subargs))
# The substitutions must be inserted in order
for ind, val in subargs:
newargs.insert(ind, val)
return self.left(*newargs, **kw)
@property
def param_names(self):
"""An ordered list of parameter names."""
return self._param_names
def _make_leaflist(self):
tdict = {}
leaflist = []
make_subtree_dict(self, "", tdict, leaflist)
self._leaflist = leaflist
self._tdict = tdict
def __getattr__(self, name):
"""
If someone accesses an attribute not already defined, map the
parameters, and then see if the requested attribute is one of
the parameters.
"""
# The following test is needed to avoid infinite recursion
# caused by deepcopy. There may be other such cases discovered.
if name == "__setstate__":
raise AttributeError
if name in self._param_names:
return self.__dict__[name]
else:
raise AttributeError(f'Attribute "{name}" not found')
def __getitem__(self, index):
if self._leaflist is None:
self._make_leaflist()
leaflist = self._leaflist
tdict = self._tdict
if isinstance(index, slice):
if index.step:
raise ValueError("Steps in slices not supported for compound models")
if index.start is not None:
if isinstance(index.start, str):
start = self._str_index_to_int(index.start)
else:
start = index.start
else:
start = 0
if index.stop is not None:
if isinstance(index.stop, str):
stop = self._str_index_to_int(index.stop)
else:
stop = index.stop - 1
else:
stop = len(leaflist) - 1
if index.stop == 0:
raise ValueError("Slice endpoint cannot be 0")
if start < 0:
start = len(leaflist) + start
if stop < 0:
stop = len(leaflist) + stop
# now search for matching node:
if stop == start: # only single value, get leaf instead in code below
index = start
else:
for key in tdict:
node, leftind, rightind = tdict[key]
if leftind == start and rightind == stop:
return node
raise IndexError("No appropriate subtree matches slice")
if np.issubdtype(type(index), np.integer):
return leaflist[index]
elif isinstance(index, str):
return leaflist[self._str_index_to_int(index)]
else:
raise TypeError("index must be integer, slice, or model name string")
def _str_index_to_int(self, str_index):
# Search through leaflist for item with that name
found = []
for nleaf, leaf in enumerate(self._leaflist):
if getattr(leaf, "name", None) == str_index:
found.append(nleaf)
if len(found) == 0:
raise IndexError(f"No component with name '{str_index}' found")
if len(found) > 1:
raise IndexError(
f"Multiple components found using '{str_index}' as name\n"
f"at indices {found}"
)
return found[0]
@property
def n_inputs(self):
"""The number of inputs of a model."""
return self._n_inputs
@n_inputs.setter
def n_inputs(self, value):
self._n_inputs = value
@property
def n_outputs(self):
"""The number of outputs of a model."""
return self._n_outputs
@n_outputs.setter
def n_outputs(self, value):
self._n_outputs = value
@property
def eqcons(self):
return self._eqcons
@eqcons.setter
def eqcons(self, value):
self._eqcons = value
@property
def ineqcons(self):
return self._eqcons
@ineqcons.setter
def ineqcons(self, value):
self._eqcons = value
def traverse_postorder(self, include_operator=False):
"""Postorder traversal of the CompoundModel tree."""
res = []
if isinstance(self.left, CompoundModel):
res = res + self.left.traverse_postorder(include_operator)
else:
res = res + [self.left]
if isinstance(self.right, CompoundModel):
res = res + self.right.traverse_postorder(include_operator)
else:
res = res + [self.right]
if include_operator:
res.append(self.op)
else:
res.append(self)
return res
def _format_expression(self, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: f"[{i}]"
for node in self.traverse_postorder():
if not isinstance(node, CompoundModel):
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
right = operands.pop()
left = operands.pop()
if node.op in OPERATOR_PRECEDENCE:
oper_order = OPERATOR_PRECEDENCE[node.op]
if isinstance(node, CompoundModel):
if (
isinstance(node.left, CompoundModel)
and OPERATOR_PRECEDENCE[node.left.op] < oper_order
):
left = f"({left})"
if (
isinstance(node.right, CompoundModel)
and OPERATOR_PRECEDENCE[node.right.op] < oper_order
):
right = f"({right})"
operands.append(" ".join((left, node.op, right)))
else:
left = f"(({left}),"
right = f"({right}))"
operands.append(" ".join((node.op[0], left, right)))
return "".join(operands)
def _format_components(self):
if self._parameters_ is None:
self._map_parameters()
return "\n\n".join(f"[{idx}]: {m!r}" for idx, m in enumerate(self._leaflist))
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
("Expression", expression),
("Components", "\n" + indent(components)),
]
return super()._format_str(keywords=keywords)
def rename(self, name):
self.name = name
return self
@property
def isleaf(self):
return False
@property
def inverse(self):
if self.op == "|":
return self.right.inverse | self.left.inverse
elif self.op == "&":
return self.left.inverse & self.right.inverse
else:
return NotImplemented
@property
def fittable(self):
"""Set the fittable attribute on a compound model."""
if self._fittable is None:
if self._leaflist is None:
self._map_parameters()
self._fittable = all(m.fittable for m in self._leaflist)
return self._fittable
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
def _map_parameters(self):
"""
Map all the constituent model parameters to the compound object,
renaming as necessary by appending a suffix number.
This can be an expensive operation, particularly for a complex
expression tree.
All the corresponding parameter attributes are created that one
expects for the Model class.
The parameter objects that the attributes point to are the same
objects as in the constiutent models. Changes made to parameter
values to either are seen by both.
Prior to calling this, none of the associated attributes will
exist. This method must be called to make the model usable by
fitting engines.
If oldnames=True, then parameters are named as in the original
implementation of compound models.
"""
if self._parameters is not None:
# do nothing
return
if self._leaflist is None:
self._make_leaflist()
self._parameters_ = {}
param_map = {}
self._param_names = []
for lindex, leaf in enumerate(self._leaflist):
if not isinstance(leaf, dict):
for param_name in leaf.param_names:
param = getattr(leaf, param_name)
new_param_name = f"{param_name}_{lindex}"
self.__dict__[new_param_name] = param
self._parameters_[new_param_name] = param
self._param_names.append(new_param_name)
param_map[new_param_name] = (lindex, param_name)
self._param_metrics = {}
self._param_map = param_map
self._param_map_inverse = {v: k for k, v in param_map.items()}
self._initialize_slices()
self._param_names = tuple(self._param_names)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name] = {}
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
@staticmethod
def _recursive_lookup(branch, adict, key):
if isinstance(branch, CompoundModel):
return adict[key]
return branch, key
def inputs_map(self):
"""
Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.
"""
inputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {inp: (self, inp) for inp in self.inputs}
elif self.op == "|":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
if isinstance(self.right, CompoundModel):
r_inputs_map = self.right.inputs_map()
for i, inp in enumerate(self.inputs):
if i < len(self.left.inputs): # Get from left
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[self.left.inputs[i]]
else:
inputs_map[inp] = self.left, self.left.inputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
inputs_map[inp] = r_inputs_map[
self.right.inputs[i - len(self.left.inputs)]
]
else:
inputs_map[inp] = (
self.right,
self.right.inputs[i - len(self.left.inputs)],
)
elif self.op == "fix_inputs":
fixed_ind = list(self.right.keys())
ind = [
list(self.left.inputs).index(i) if isinstance(i, str) else i
for i in fixed_ind
]
inp_ind = list(range(self.left.n_inputs))
for i in ind:
inp_ind.remove(i)
for i in inp_ind:
inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i]
else:
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.left.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
return inputs_map
def _parameter_units_for_data_units(self, input_units, output_units):
if self._leaflist is None:
self._map_parameters()
units_for_data = {}
for imodel, model in enumerate(self._leaflist):
units_for_data_leaf = model._parameter_units_for_data_units(
input_units, output_units
)
for param_leaf in units_for_data_leaf:
param = self._param_map_inverse[(imodel, param_leaf)]
units_for_data[param] = units_for_data_leaf[param_leaf]
return units_for_data
@property
def input_units(self):
inputs_map = self.inputs_map()
input_units_dict = {
key: inputs_map[key][0].input_units[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units is not None
}
if input_units_dict:
return input_units_dict
return None
@property
def input_units_equivalencies(self):
inputs_map = self.inputs_map()
input_units_equivalencies_dict = {
key: inputs_map[key][0].input_units_equivalencies[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units_equivalencies is not None
}
if not input_units_equivalencies_dict:
return None
return input_units_equivalencies_dict
@property
def input_units_allow_dimensionless(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_allow_dimensionless[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def input_units_strict(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_strict[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def return_units(self):
outputs_map = self.outputs_map()
return {
key: outputs_map[key][0].return_units[orig_key]
for key, (mod, orig_key) in outputs_map.items()
if outputs_map[key][0].return_units is not None
}
def outputs_map(self):
"""
Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.
"""
outputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {out: (self, out) for out in self.outputs}
elif self.op == "|":
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for out in self.outputs:
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[out]
else:
outputs_map[out] = self.right, out
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for i, out in enumerate(self.outputs):
if i < len(self.left.outputs): # Get from left
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map[self.left.outputs[i]]
else:
outputs_map[out] = self.left, self.left.outputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[
self.right.outputs[i - len(self.left.outputs)]
]
else:
outputs_map[out] = (
self.right,
self.right.outputs[i - len(self.left.outputs)],
)
elif self.op == "fix_inputs":
return self.left.outputs_map()
else:
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
for out in self.left.outputs:
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map()[out]
else:
outputs_map[out] = self.left, out
return outputs_map
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = self.get_bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel, important when using
# add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
def replace_submodel(self, name, model):
"""
Construct a new `~astropy.modeling.CompoundModel` instance from an
existing CompoundModel, replacing the named submodel with a new model.
In order to ensure that inverses and names are kept/reconstructed, it's
necessary to rebuild the CompoundModel from the replaced node all the
way back to the base. The original CompoundModel is left untouched.
Parameters
----------
name : str
name of submodel to be replaced
model : `~astropy.modeling.Model`
replacement model
"""
submodels = [
m for m in self.traverse_postorder() if getattr(m, "name", None) == name
]
if submodels:
if len(submodels) > 1:
raise ValueError(f"More than one submodel named {name}")
old_model = submodels.pop()
if len(old_model) != len(model):
raise ValueError(
"New and old models must have equal values for n_models"
)
# Do this check first in order to raise a more helpful Exception,
# although it would fail trying to construct the new CompoundModel
if (
old_model.n_inputs != model.n_inputs
or old_model.n_outputs != model.n_outputs
):
raise ValueError(
"New model must match numbers of inputs and "
"outputs of existing model"
)
tree = _get_submodel_path(self, name)
while tree:
branch = self.copy()
for node in tree[:-1]:
branch = getattr(branch, node)
setattr(branch, tree[-1], model)
model = CompoundModel(
branch.op, branch.left, branch.right, name=branch.name
)
tree = tree[:-1]
return model
else:
raise ValueError(f"No submodels found named {name}")
def _set_sub_models_and_parameter_units(self, left, right):
"""
Provides a work-around to properly set the sub models and respective
parameters's units/values when using ``without_units_for_data``
or ``without_units_for_data`` methods.
"""
model = CompoundModel(self.op, left, right)
self.left = left
self.right = right
for name in model.param_names:
model_parameter = getattr(model, name)
parameter = getattr(self, name)
parameter.value = model_parameter.value
parameter._set_unit(model_parameter.unit, force=True)
def without_units_for_data(self, **kwargs):
"""
See `~astropy.modeling.Model.without_units_for_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. It does this
by modifying the output units of each sub model by using the output
units of the other sub model so that we can apply the original function
and get the desired result.
Additional data has to be output in the mixed output unit case
so that the units can be properly rebuilt by
`~astropy.modeling.CompoundModel.with_units_from_data`.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
model = self.copy()
inputs = {inp: kwargs[inp] for inp in self.inputs}
left_units = self.left.output_units(**kwargs)
right_units = self.right.output_units(**kwargs)
if self.op == "*":
left_kwargs = {
out: kwargs[out] / right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: kwargs[out] / left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
else:
left_kwargs = {
out: kwargs[out] * right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: 1 / kwargs[out] * left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
left_kwargs.update(inputs.copy())
right_kwargs.update(inputs.copy())
left = self.left.without_units_for_data(**left_kwargs)
if isinstance(left, tuple):
left_kwargs["_left_kwargs"] = left[1]
left_kwargs["_right_kwargs"] = left[2]
left = left[0]
right = self.right.without_units_for_data(**right_kwargs)
if isinstance(right, tuple):
right_kwargs["_left_kwargs"] = right[1]
right_kwargs["_right_kwargs"] = right[2]
right = right[0]
model._set_sub_models_and_parameter_units(left, right)
return model, left_kwargs, right_kwargs
else:
return super().without_units_for_data(**kwargs)
def with_units_from_data(self, **kwargs):
"""
See `~astropy.modeling.Model.with_units_from_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. In order to
do this it requires some additional information output by
`~astropy.modeling.CompoundModel.without_units_for_data` passed as
keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
left_kwargs = kwargs.pop("_left_kwargs")
right_kwargs = kwargs.pop("_right_kwargs")
left = self.left.with_units_from_data(**left_kwargs)
right = self.right.with_units_from_data(**right_kwargs)
model = self.copy()
model._set_sub_models_and_parameter_units(left, right)
return model
else:
return super().with_units_from_data(**kwargs)
def _get_submodel_path(model, name):
"""Find the route down a CompoundModel's tree to the model with the
specified name (whether it's a leaf or not).
"""
if getattr(model, "name", None) == name:
return []
try:
return ["left"] + _get_submodel_path(model.left, name)
except (AttributeError, TypeError):
pass
try:
return ["right"] + _get_submodel_path(model.right, name)
except (AttributeError, TypeError):
pass
def binary_operation(binoperator, left, right):
"""
Perform binary operation. Operands may be matching tuples of operands.
"""
if isinstance(left, tuple) and isinstance(right, tuple):
return tuple(binoperator(item[0], item[1]) for item in zip(left, right))
return binoperator(left, right)
def get_ops(tree, opset):
"""
Recursive function to collect operators used.
"""
if isinstance(tree, CompoundModel):
opset.add(tree.op)
get_ops(tree.left, opset)
get_ops(tree.right, opset)
else:
return
def make_subtree_dict(tree, nodepath, tdict, leaflist):
"""Traverse a tree noting each node by a key.
The key indicates all the left/right choices necessary to reach that node.
Each key will reference a tuple that contains:
- reference to the compound model for that node.
- left most index contained within that subtree
(relative to all indices for the whole tree)
- right most index contained within that subtree
"""
# if this is a leaf, just append it to the leaflist
if not hasattr(tree, "isleaf"):
leaflist.append(tree)
else:
leftmostind = len(leaflist)
make_subtree_dict(tree.left, nodepath + "l", tdict, leaflist)
make_subtree_dict(tree.right, nodepath + "r", tdict, leaflist)
rightmostind = len(leaflist) - 1
tdict[nodepath] = (tree, leftmostind, rightmostind)
_ORDER_OF_OPERATORS = [("fix_inputs",), ("|",), ("&",), ("+", "-"), ("*", "/"), ("**",)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None):
"""
This function creates a compound model with one or more of the input
values of the input model assigned fixed values (scalar or array).
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that one or more of the
model input values will be fixed to some constant value.
values : dict
A dictionary where the key identifies which input to fix
and its value is the value to fix it at. The key may either be the
name of the input or a number reflecting its order in the inputs.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> g = Gaussian2D(1, 2, 3, 4, 5)
>>> gv = fix_inputs(g, {0: 2.5})
Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y)
"""
model = CompoundModel("fix_inputs", modelinstance, values)
if bounding_boxes is not None:
if selector_args is None:
selector_args = tuple((key, True) for key in values.keys())
bbox = CompoundBoundingBox.validate(
modelinstance, bounding_boxes, selector_args
)
_selector = bbox.selector_args.get_fixed_values(modelinstance, values)
new_bbox = bbox[_selector]
new_bbox = new_bbox.__class__.validate(model, new_bbox)
model.bounding_box = new_bbox
return model
def bind_bounding_box(modelinstance, bounding_box, ignored=None, order="C"):
"""
Set a validated bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated bounding box will be set on.
bounding_box : tuple
A bounding box tuple, see :ref:`astropy:bounding-boxes` for details
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = ModelBoundingBox.validate(
modelinstance, bounding_box, ignored=ignored, order=order
)
def bind_compound_bounding_box(
modelinstance,
bounding_boxes,
selector_args,
create_selector=None,
ignored=None,
order="C",
):
"""
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = CompoundBoundingBox.validate(
modelinstance,
bounding_boxes,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
)
def custom_model(*args, fit_deriv=None):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
The model is separable only if there is a single input.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
The standard settable model properties can be configured by default
using keyword arguments matching the name of the property; however,
these values are not set as model "parameters". Moreover, users
cannot use keyword arguments matching non-settable model properties,
with the exception of ``n_outputs`` which should be set to the number of
outputs of your function.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
f"{__name__} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any)."
)
def _custom_model_inputs(func):
"""
Processes the inputs to the `custom_model`'s function into the appropriate
categories.
Parameters
----------
func : callable
Returns
-------
inputs : list
list of evaluation inputs
special_params : dict
dictionary of model properties which require special treatment
settable_params : dict
dictionary of defaults for settable model properties
params : dict
dictionary of model parameters set by `custom_model`'s function
"""
inputs, parameters = get_inputs_and_params(func)
special = ["n_outputs"]
settable = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is not None
]
properties = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is None and attr not in special
]
special_params = {}
settable_params = {}
params = {}
for param in parameters:
if param.name in special:
special_params[param.name] = param.default
elif param.name in settable:
settable_params[param.name] = param.default
elif param.name in properties:
raise ValueError(
f"Parameter '{param.name}' cannot be a model property: {properties}."
)
else:
params[param.name] = param.default
return inputs, special_params, settable_params, params
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable object"
)
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other callable object"
)
model_name = func.__name__
inputs, special_params, settable_params, params = _custom_model_inputs(func)
if fit_deriv is not None and len(fit_deriv.__defaults__) != len(params):
raise ModelDefinitionError(
"derivative function should accept same number of parameters as func."
)
params = {
param: Parameter(param, default=default) for param, default in params.items()
}
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
members = {
"__module__": str(modname),
"__doc__": func.__doc__,
"n_inputs": len(inputs),
"n_outputs": special_params.pop("n_outputs", 1),
"evaluate": staticmethod(func),
"_settable_properties": settable_params,
}
if fit_deriv is not None:
members["fit_deriv"] = staticmethod(fit_deriv)
members.update(params)
cls = type(model_name, (FittableModel,), members)
cls._separable = len(inputs) == 1
return cls
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from
``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError("If no bounding_box is set, coords or arr must be input.")
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError(
"number of array dimensions inconsistent with number of model inputs."
)
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError(
"coordinate length inconsistent with the number of model inputs."
)
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError("coordinate shape inconsistent with the array shape.")
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input"
" arr in one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def hide_inverse(model):
"""
This is a convenience function intended to disable automatic generation
of the inverse in compound models by disabling one of the constituent
model's inverse. This is to handle cases where user provided inverse
functions are not compatible within an expression.
Example:
compound_model.inverse = hide_inverse(m1) + m2 + m3
This will insure that the defined inverse itself won't attempt to
build its own inverse, which would otherwise fail in this example
(e.g., m = m1 + m2 + m3 happens to raises an exception for this
reason.)
Note that this permanently disables it. To prevent that either copy
the model or restore the inverse later.
"""
del model.inverse
return model
|
52431158dc705e6d8d2c88900dd087e498148b09d04d414879c82fbd3134bbbb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Convolution Model."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from .core import CompoundModel
class Convolution(CompoundModel):
"""
Wrapper class for a convolution model.
Parameters
----------
operator: tuple
The SPECIAL_OPERATORS entry for the convolution being used.
model : Model
The model for the convolution.
kernel: Model
The kernel model for the convolution.
bounding_box : tuple
A bounding box to define the limits of the integration
approximation for the convolution.
resolution : float
The resolution for the approximation of the convolution.
cache : bool, optional
Allow convolution computation to be cached for reuse. This is
enabled by default.
Notes
-----
This is wrapper is necessary to handle the limitations of the
pseudospectral convolution binary operator implemented in
astropy.convolution under `~astropy.convolution.convolve_fft`. In this
`~astropy.convolution.convolve_fft` it is assumed that the inputs ``array``
and ``kernel`` span a sufficient portion of the support of the functions of
the convolution. Consequently, the ``Compound`` created by the
`~astropy.convolution.convolve_models` function makes the assumption that
one should pass an input array that sufficiently spans this space. This means
that slightly different input arrays to this model will result in different
outputs, even on points of intersection between these arrays.
This issue is solved by requiring a ``bounding_box`` together with a
resolution so that one can pre-calculate the entire domain and then
(by default) cache the convolution values. The function then just
interpolates the results from this cache.
"""
def __init__(self, operator, model, kernel, bounding_box, resolution, cache=True):
super().__init__(operator, model, kernel)
self.bounding_box = bounding_box
self._resolution = resolution
self._cache_convolution = cache
self._kwargs = None
self._convolution = None
def clear_cache(self):
"""
Clears the cached convolution.
"""
self._kwargs = None
self._convolution = None
def _get_convolution(self, **kwargs):
if (self._convolution is None) or (self._kwargs != kwargs):
domain = self.bounding_box.domain(self._resolution)
mesh = np.meshgrid(*domain)
data = super().__call__(*mesh, **kwargs)
from scipy.interpolate import RegularGridInterpolator
convolution = RegularGridInterpolator(domain, data)
if self._cache_convolution:
self._kwargs = kwargs
self._convolution = convolution
else:
convolution = self._convolution
return convolution
@staticmethod
def _convolution_inputs(*args):
not_scalar = np.where([not np.isscalar(arg) for arg in args])[0]
if len(not_scalar) == 0:
return np.array(args), (1,)
else:
output_shape = args[not_scalar[0]].shape
if not all(args[index].shape == output_shape for index in not_scalar):
raise ValueError("Values have differing shapes")
inputs = []
for arg in args:
if np.isscalar(arg):
inputs.append(np.full(output_shape, arg))
else:
inputs.append(arg)
return np.reshape(inputs, (len(inputs), -1)).T, output_shape
@staticmethod
def _convolution_outputs(outputs, output_shape):
return outputs.reshape(output_shape)
def __call__(self, *args, **kw):
inputs, output_shape = self._convolution_inputs(*args)
convolution = self._get_convolution(**kw)
outputs = convolution(inputs)
return self._convolution_outputs(outputs, output_shape)
|
0e1ecfa4eb55a4ebf8063c7779facb72c07afa4a4f287dae027641257deaa259 | """
Special models useful for complex compound models where control is needed over
which outputs from a source model are mapped to which inputs of a target model.
"""
# pylint: disable=invalid-name
from astropy.units import Quantity
from .core import FittableModel, Model
__all__ = ["Mapping", "Identity", "UnitsMapping"]
class Mapping(FittableModel):
"""
Allows inputs to be reordered, duplicated or dropped.
Parameters
----------
mapping : tuple
A tuple of integers representing indices of the inputs to this model
to return and in what order to return them. See
:ref:`astropy:compound-model-mappings` for more details.
n_inputs : int
Number of inputs; if `None` (default) then ``max(mapping) + 1`` is
used (i.e. the highest input index used in the mapping).
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Raises
------
TypeError
Raised when number of inputs is less that ``max(mapping)``.
Examples
--------
>>> from astropy.modeling.models import Polynomial2D, Shift, Mapping
>>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
>>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1)
>>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2)
>>> model(1, 2) # doctest: +FLOAT_CMP
(17.0, 14.2)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, mapping, n_inputs=None, name=None, meta=None):
self._inputs = ()
self._outputs = ()
if n_inputs is None:
self._n_inputs = max(mapping) + 1
else:
self._n_inputs = n_inputs
self._n_outputs = len(mapping)
super().__init__(name=name, meta=meta)
self.inputs = tuple("x" + str(idx) for idx in range(self._n_inputs))
self.outputs = tuple("x" + str(idx) for idx in range(self._n_outputs))
self._mapping = mapping
self._input_units_strict = {key: False for key in self._inputs}
self._input_units_allow_dimensionless = {key: False for key in self._inputs}
@property
def n_inputs(self):
return self._n_inputs
@property
def n_outputs(self):
return self._n_outputs
@property
def mapping(self):
"""Integers representing indices of the inputs."""
return self._mapping
def __repr__(self):
if self.name is None:
return f"<Mapping({self.mapping})>"
return f"<Mapping({self.mapping}, name={self.name!r})>"
def evaluate(self, *args):
if len(args) != self.n_inputs:
name = self.name if self.name is not None else "Mapping"
raise TypeError(f"{name} expects {self.n_inputs} inputs; got {len(args)}")
result = tuple(args[idx] for idx in self._mapping)
if self.n_outputs == 1:
return result[0]
return result
@property
def inverse(self):
"""
A `Mapping` representing the inverse of the current mapping.
Raises
------
`NotImplementedError`
An inverse does no exist on mappings that drop some of its inputs
(there is then no way to reconstruct the inputs that were dropped).
"""
try:
mapping = tuple(self.mapping.index(idx) for idx in range(self.n_inputs))
except ValueError:
raise NotImplementedError(
f"Mappings such as {self.mapping} that drop one or more of their inputs"
" are not invertible at this time."
)
inv = self.__class__(mapping)
inv._inputs = self._outputs
inv._outputs = self._inputs
inv._n_inputs = len(inv._inputs)
inv._n_outputs = len(inv._outputs)
return inv
class Identity(Mapping):
"""
Returns inputs unchanged.
This class is useful in compound models when some of the inputs must be
passed unchanged to the next model.
Parameters
----------
n_inputs : int
Specifies the number of inputs this identity model accepts.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Examples
--------
Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs::
>>> from astropy.modeling.models import (Polynomial1D, Shift, Scale,
... Identity)
>>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2)
>>> model(1,1) # doctest: +FLOAT_CMP
(2.4, 2.0)
>>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP
(1.0, 1.0)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, n_inputs, name=None, meta=None):
mapping = tuple(range(n_inputs))
super().__init__(mapping, name=name, meta=meta)
def __repr__(self):
if self.name is None:
return f"<Identity({self.n_inputs})>"
return f"<Identity({self.n_inputs}, name={self.name!r})>"
@property
def inverse(self):
"""
The inverse transformation.
In this case of `Identity`, ``self.inverse is self``.
"""
return self
class UnitsMapping(Model):
"""
Mapper that operates on the units of the input, first converting to
canonical units, then assigning new units without further conversion.
Used by Model.coerce_units to support units on otherwise unitless models
such as Polynomial1D.
Parameters
----------
mapping : tuple
A tuple of (input_unit, output_unit) pairs, one per input, matched to the
inputs by position. The first element of the each pair is the unit that
the model will accept (specify ``dimensionless_unscaled``
to accept dimensionless input). The second element is the unit that the
model will return. Specify ``dimensionless_unscaled``
to return dimensionless Quantity, and `None` to return raw values without
Quantity.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : dict or bool, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like, optional
Free-form metadata to associate with this model.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D, UnitsMapping
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = UnitsMapping(((u.m, None),)) | poly
>>> model = model | UnitsMapping(((None, u.s),))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D, UnitsMapping
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = UnitsMapping(((u.m, None),), input_units_allow_dimensionless=True) | poly
>>> model = model | UnitsMapping(((None, u.s),))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
def __init__(
self,
mapping,
input_units_equivalencies=None,
input_units_allow_dimensionless=False,
name=None,
meta=None,
):
self._mapping = mapping
none_mapping_count = len([m for m in mapping if m[-1] is None])
if none_mapping_count > 0 and none_mapping_count != len(mapping):
raise ValueError("If one return unit is None, then all must be None")
# These attributes are read and handled by Model
self._input_units_strict = True
self.input_units_equivalencies = input_units_equivalencies
self._input_units_allow_dimensionless = input_units_allow_dimensionless
super().__init__(name=name, meta=meta)
# Can't invoke this until after super().__init__, since
# we need self.inputs and self.outputs to be populated.
self._rebuild_units()
def _rebuild_units(self):
self._input_units = {
input_name: input_unit
for input_name, (input_unit, _) in zip(self.inputs, self.mapping)
}
@property
def n_inputs(self):
return len(self._mapping)
@property
def n_outputs(self):
return len(self._mapping)
@property
def inputs(self):
return super().inputs
@inputs.setter
def inputs(self, value):
super(UnitsMapping, self.__class__).inputs.fset(self, value)
self._rebuild_units()
@property
def outputs(self):
return super().outputs
@outputs.setter
def outputs(self, value):
super(UnitsMapping, self.__class__).outputs.fset(self, value)
self._rebuild_units()
@property
def input_units(self):
return self._input_units
@property
def mapping(self):
return self._mapping
def evaluate(self, *args):
result = []
for arg, (_, return_unit) in zip(args, self.mapping):
if isinstance(arg, Quantity):
value = arg.value
else:
value = arg
if return_unit is None:
result.append(value)
else:
result.append(Quantity(value, return_unit, subok=True))
if self.n_outputs == 1:
return result[0]
else:
return tuple(result)
def __repr__(self):
if self.name is None:
return f"<UnitsMapping({self.mapping})>"
else:
return f"<UnitsMapping({self.mapping}, name={self.name!r})>"
|
696b20a61c235b09d4df9e6203150685af7370fdbf49270a7335361d95935447 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Spline models and fitters."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import abc
import functools
import warnings
import numpy as np
from astropy.utils import isiterable
from astropy.utils.exceptions import AstropyUserWarning
from .core import FittableModel, ModelDefinitionError
from .parameters import Parameter
__all__ = [
"Spline1D",
"SplineInterpolateFitter",
"SplineSmoothingFitter",
"SplineExactKnotsFitter",
"SplineSplrepFitter",
]
__doctest_requires__ = {"Spline1D": ["scipy"]}
class _Spline(FittableModel):
"""Base class for spline models."""
_knot_names = ()
_coeff_names = ()
optional_inputs = {}
def __init__(
self,
knots=None,
coeffs=None,
degree=None,
bounds=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta
)
self._user_knots = False
self._init_tck(degree)
# Hack to allow an optional model argument
self._create_optional_inputs()
if knots is not None:
self._init_spline(knots, coeffs, bounds)
elif coeffs is not None:
raise ValueError(
"If one passes a coeffs vector one needs to also pass knots!"
)
@property
def param_names(self):
"""
Coefficient names generated based on the spline's degree and
number of knots.
"""
return tuple(list(self._knot_names) + list(self._coeff_names))
@staticmethod
def _optional_arg(arg):
return f"_{arg}"
def _create_optional_inputs(self):
for arg in self.optional_inputs:
attribute = self._optional_arg(arg)
if hasattr(self, attribute):
raise ValueError(
f"Optional argument {arg} already exists in this class!"
)
else:
setattr(self, attribute, None)
def _intercept_optional_inputs(self, **kwargs):
new_kwargs = kwargs
for arg in self.optional_inputs:
if arg in kwargs:
attribute = self._optional_arg(arg)
if getattr(self, attribute) is None:
setattr(self, attribute, kwargs[arg])
del new_kwargs[arg]
else:
raise RuntimeError(
f"{arg} has already been set, something has gone wrong!"
)
return new_kwargs
def evaluate(self, *args, **kwargs):
"""Extract the optional kwargs passed to call."""
optional_inputs = kwargs
for arg in self.optional_inputs:
attribute = self._optional_arg(arg)
if arg in kwargs:
# Options passed in
optional_inputs[arg] = kwargs[arg]
elif getattr(self, attribute) is not None:
# No options passed in and Options set
optional_inputs[arg] = getattr(self, attribute)
setattr(self, attribute, None)
else:
# No options passed in and No options set
optional_inputs[arg] = self.optional_inputs[arg]
return optional_inputs
def __call__(self, *args, **kwargs):
"""
Make model callable to model evaluation.
"""
# Hack to allow an optional model argument
kwargs = self._intercept_optional_inputs(**kwargs)
return super().__call__(*args, **kwargs)
def _create_parameter(self, name: str, index: int, attr: str, fixed=False):
"""
Create a spline parameter linked to an attribute array.
Parameters
----------
name : str
Name for the parameter
index : int
The index of the parameter in the array
attr : str
The name for the attribute array
fixed : optional, bool
If the parameter should be fixed or not
"""
# Hack to allow parameters and attribute array to freely exchange values
# _getter forces reading value from attribute array
# _setter forces setting value to attribute array
def _getter(value, model: "_Spline", index: int, attr: str):
return getattr(model, attr)[index]
def _setter(value, model: "_Spline", index: int, attr: str):
getattr(model, attr)[index] = value
return value
getter = functools.partial(_getter, index=index, attr=attr)
setter = functools.partial(_setter, index=index, attr=attr)
default = getattr(self, attr)
param = Parameter(
name=name, default=default[index], fixed=fixed, getter=getter, setter=setter
)
# setter/getter wrapper for parameters in this case require the
# parameter to have a reference back to its parent model
param.model = self
param.value = default[index]
# Add parameter to model
self.__dict__[name] = param
def _create_parameters(self, base_name: str, attr: str, fixed=False):
"""
Create a spline parameters linked to an attribute array for all
elements in that array.
Parameters
----------
base_name : str
Base name for the parameters
attr : str
The name for the attribute array
fixed : optional, bool
If the parameters should be fixed or not
"""
names = []
for index in range(len(getattr(self, attr))):
name = f"{base_name}{index}"
names.append(name)
self._create_parameter(name, index, attr, fixed)
return tuple(names)
@abc.abstractmethod
def _init_parameters(self):
raise NotImplementedError("This needs to be implemented")
@abc.abstractmethod
def _init_data(self, knots, coeffs, bounds=None):
raise NotImplementedError("This needs to be implemented")
def _init_spline(self, knots, coeffs, bounds=None):
self._init_data(knots, coeffs, bounds)
self._init_parameters()
# fill _parameters and related attributes
self._initialize_parameters((), {})
self._initialize_slices()
# Calling this will properly fill the _parameter vector, which is
# used directly sometimes without being properly filled.
_ = self.parameters
def _init_tck(self, degree):
self._c = None
self._t = None
self._degree = degree
class Spline1D(_Spline):
"""
One dimensional Spline Model.
Parameters
----------
knots : optional
Define the knots for the spline. Can be 1) the number of interior
knots for the spline, 2) the array of all knots for the spline, or
3) If both bounds are defined, the interior knots for the spline
coeffs : optional
The array of knot coefficients for the spline
degree : optional
The degree of the spline. It must be 1 <= degree <= 5, default is 3.
bounds : optional
The upper and lower bounds of the spline.
Notes
-----
Much of the functionality of this model is provided by
`scipy.interpolate.BSpline` which can be directly accessed via the
bspline property.
Fitting for this model is provided by wrappers for:
`scipy.interpolate.UnivariateSpline`,
`scipy.interpolate.InterpolatedUnivariateSpline`,
and `scipy.interpolate.LSQUnivariateSpline`.
If one fails to define any knots/coefficients, no parameters will
be added to this model until a fitter is called. This is because
some of the fitters for splines vary the number of parameters and so
we cannot define the parameter set until after fitting in these cases.
Since parameters are not necessarily known at model initialization,
setting model parameters directly via the model interface has been
disabled.
Direct constructors are provided for this model which incorporate the
fitting to data directly into model construction.
Knot parameters are declared as "fixed" parameters by default to
enable the use of other `astropy.modeling` fitters to be used to
fit this model.
Examples
--------
>>> import numpy as np
>>> from astropy.modeling.models import Spline1D
>>> from astropy.modeling import fitting
>>> np.random.seed(42)
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> xs = np.linspace(-3, 3, 1000)
A 1D interpolating spline can be fit to data:
>>> fitter = fitting.SplineInterpolateFitter()
>>> spl = fitter(Spline1D(), x, y)
Similarly, a smoothing spline can be fit to data:
>>> fitter = fitting.SplineSmoothingFitter()
>>> spl = fitter(Spline1D(), x, y, s=0.5)
Similarly, a spline can be fit to data using an exact set of interior knots:
>>> t = [-1, 0, 1]
>>> fitter = fitting.SplineExactKnotsFitter()
>>> spl = fitter(Spline1D(), x, y, t=t)
"""
n_inputs = 1
n_outputs = 1
_separable = True
optional_inputs = {"nu": 0}
def __init__(
self,
knots=None,
coeffs=None,
degree=3,
bounds=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
super().__init__(
knots=knots,
coeffs=coeffs,
degree=degree,
bounds=bounds,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
)
@property
def t(self):
"""
The knots vector.
"""
if self._t is None:
return np.concatenate(
(np.zeros(self._degree + 1), np.ones(self._degree + 1))
)
else:
return self._t
@t.setter
def t(self, value):
if self._t is None:
raise ValueError(
"The model parameters must be initialized before setting knots."
)
elif len(value) == len(self._t):
self._t = value
else:
raise ValueError(
"There must be exactly as many knots as previously defined."
)
@property
def t_interior(self):
"""
The interior knots.
"""
return self.t[self.degree + 1 : -(self.degree + 1)]
@property
def c(self):
"""
The coefficients vector.
"""
if self._c is None:
return np.zeros(len(self.t))
else:
return self._c
@c.setter
def c(self, value):
if self._c is None:
raise ValueError(
"The model parameters must be initialized before setting coeffs."
)
elif len(value) == len(self._c):
self._c = value
else:
raise ValueError(
"There must be exactly as many coeffs as previously defined."
)
@property
def degree(self):
"""
The degree of the spline polynomials.
"""
return self._degree
@property
def _initialized(self):
return self._t is not None and self._c is not None
@property
def tck(self):
"""
Scipy 'tck' tuple representation.
"""
return (self.t, self.c, self.degree)
@tck.setter
def tck(self, value):
if self._initialized:
if value[2] != self.degree:
raise ValueError("tck has incompatible degree!")
self.t = value[0]
self.c = value[1]
else:
self._init_spline(value[0], value[1])
# Calling this will properly fill the _parameter vector, which is
# used directly sometimes without being properly filled.
_ = self.parameters
@property
def bspline(self):
"""
Scipy bspline object representation.
"""
from scipy.interpolate import BSpline
return BSpline(*self.tck)
@bspline.setter
def bspline(self, value):
from scipy.interpolate import BSpline
if isinstance(value, BSpline):
self.tck = value.tck
else:
self.tck = value
@property
def knots(self):
"""
Dictionary of knot parameters.
"""
return [getattr(self, knot) for knot in self._knot_names]
@property
def user_knots(self):
"""If the knots have been supplied by the user."""
return self._user_knots
@user_knots.setter
def user_knots(self, value):
self._user_knots = value
@property
def coeffs(self):
"""
Dictionary of coefficient parameters.
"""
return [getattr(self, coeff) for coeff in self._coeff_names]
def _init_parameters(self):
self._knot_names = self._create_parameters("knot", "t", fixed=True)
self._coeff_names = self._create_parameters("coeff", "c")
def _init_bounds(self, bounds=None):
if bounds is None:
bounds = [None, None]
if bounds[0] is None:
lower = np.zeros(self._degree + 1)
else:
lower = np.array([bounds[0]] * (self._degree + 1))
if bounds[1] is None:
upper = np.ones(self._degree + 1)
else:
upper = np.array([bounds[1]] * (self._degree + 1))
if bounds[0] is not None and bounds[1] is not None:
self.bounding_box = bounds
has_bounds = True
else:
has_bounds = False
return has_bounds, lower, upper
def _init_knots(self, knots, has_bounds, lower, upper):
if np.issubdtype(type(knots), np.integer):
self._t = np.concatenate((lower, np.zeros(knots), upper))
elif isiterable(knots):
self._user_knots = True
if has_bounds:
self._t = np.concatenate((lower, np.array(knots), upper))
else:
if len(knots) < 2 * (self._degree + 1):
raise ValueError(
f"Must have at least {2*(self._degree + 1)} knots."
)
self._t = np.array(knots)
else:
raise ValueError(f"Knots: {knots} must be iterable or value")
# check that knots form a viable spline
self.bspline
def _init_coeffs(self, coeffs=None):
if coeffs is None:
self._c = np.zeros(len(self._t))
else:
self._c = np.array(coeffs)
# check that coeffs form a viable spline
self.bspline
def _init_data(self, knots, coeffs, bounds=None):
self._init_knots(knots, *self._init_bounds(bounds))
self._init_coeffs(coeffs)
def evaluate(self, *args, **kwargs):
"""
Evaluate the spline.
Parameters
----------
x :
(positional) The points where the model is evaluating the spline at
nu : optional
(kwarg) The derivative of the spline for evaluation, 0 <= nu <= degree + 1.
Default: 0.
"""
kwargs = super().evaluate(*args, **kwargs)
x = args[0]
if "nu" in kwargs:
if kwargs["nu"] > self.degree + 1:
raise RuntimeError(
"Cannot evaluate a derivative of "
f"order higher than {self.degree + 1}"
)
return self.bspline(x, **kwargs)
def derivative(self, nu=1):
"""
Create a spline that is the derivative of this one.
Parameters
----------
nu : int, optional
Derivative order, default is 1.
"""
if nu <= self.degree:
bspline = self.bspline.derivative(nu=nu)
derivative = Spline1D(degree=bspline.k)
derivative.bspline = bspline
return derivative
else:
raise ValueError(f"Must have nu <= {self.degree}")
def antiderivative(self, nu=1):
"""
Create a spline that is an antiderivative of this one.
Parameters
----------
nu : int, optional
Antiderivative order, default is 1.
Notes
-----
Assumes constant of integration is 0
"""
if (nu + self.degree) <= 5:
bspline = self.bspline.antiderivative(nu=nu)
antiderivative = Spline1D(degree=bspline.k)
antiderivative.bspline = bspline
return antiderivative
else:
raise ValueError(
"Supported splines can have max degree 5, "
f"antiderivative degree will be {nu + self.degree}"
)
class _SplineFitter(abc.ABC):
"""
Base Spline Fitter.
"""
def __init__(self):
self.fit_info = {"resid": None, "spline": None}
def _set_fit_info(self, spline):
self.fit_info["resid"] = spline.get_residual()
self.fit_info["spline"] = spline
@abc.abstractmethod
def _fit_method(self, model, x, y, **kwargs):
raise NotImplementedError("This has not been implemented for _SplineFitter.")
def __call__(self, model, x, y, z=None, **kwargs):
model_copy = model.copy()
if isinstance(model_copy, Spline1D):
if z is not None:
raise ValueError("1D model can only have 2 data points.")
spline = self._fit_method(model_copy, x, y, **kwargs)
else:
raise ModelDefinitionError(
"Only spline models are compatible with this fitter."
)
self._set_fit_info(spline)
return model_copy
class SplineInterpolateFitter(_SplineFitter):
"""
Fit an interpolating spline.
"""
def _fit_method(self, model, x, y, **kwargs):
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if model.user_knots:
warnings.warn(
"The current user specified knots maybe ignored for interpolating data",
AstropyUserWarning,
)
model.user_knots = False
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import InterpolatedUnivariateSpline
spline = InterpolatedUnivariateSpline(
x, y, w=weights, bbox=bbox, k=model.degree
)
model.tck = spline._eval_args
return spline
class SplineSmoothingFitter(_SplineFitter):
"""
Fit a smoothing spline.
"""
def _fit_method(self, model, x, y, **kwargs):
s = kwargs.pop("s", None)
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if model.user_knots:
warnings.warn(
"The current user specified knots maybe ignored for smoothing data",
AstropyUserWarning,
)
model.user_knots = False
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(x, y, w=weights, bbox=bbox, k=model.degree, s=s)
model.tck = spline._eval_args
return spline
class SplineExactKnotsFitter(_SplineFitter):
"""
Fit a spline using least-squares regression.
"""
def _fit_method(self, model, x, y, **kwargs):
t = kwargs.pop("t", None)
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if t is not None:
if model.user_knots:
warnings.warn(
"The current user specified knots will be "
"overwritten for by knots passed into this function",
AstropyUserWarning,
)
else:
if model.user_knots:
t = model.t_interior
else:
raise RuntimeError("No knots have been provided")
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import LSQUnivariateSpline
spline = LSQUnivariateSpline(x, y, t, w=weights, bbox=bbox, k=model.degree)
model.tck = spline._eval_args
return spline
class SplineSplrepFitter(_SplineFitter):
"""
Fit a spline using the `scipy.interpolate.splrep` function interface.
"""
def __init__(self):
super().__init__()
self.fit_info = {"fp": None, "ier": None, "msg": None}
def _fit_method(self, model, x, y, **kwargs):
t = kwargs.pop("t", None)
s = kwargs.pop("s", None)
task = kwargs.pop("task", 0)
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if t is not None:
if model.user_knots:
warnings.warn(
"The current user specified knots will be "
"overwritten for by knots passed into this function",
AstropyUserWarning,
)
else:
if model.user_knots:
t = model.t_interior
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import splrep
tck, fp, ier, msg = splrep(
x,
y,
w=weights,
xb=bbox[0],
xe=bbox[1],
k=model.degree,
s=s,
t=t,
task=task,
full_output=1,
)
model.tck = tck
return fp, ier, msg
def _set_fit_info(self, spline):
self.fit_info["fp"] = spline[0]
self.fit_info["ier"] = spline[1]
self.fit_info["msg"] = spline[2]
|
a00e634df69915e87d366cde6825f4f100438fcc453216871d1721ae5928a53a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
Optimization algorithms used in `~astropy.modeling.fitting`.
"""
import abc
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["Optimization", "SLSQP", "Simplex"]
# Maximum number of iterations
DEFAULT_MAXITER = 100
# Step for the forward difference approximation of the Jacobian
DEFAULT_EPS = np.sqrt(np.finfo(float).eps)
# Default requested accuracy
DEFAULT_ACC = 1e-07
DEFAULT_BOUNDS = (-(10**12), 10**12)
class Optimization(metaclass=abc.ABCMeta):
"""
Base class for optimizers.
Parameters
----------
opt_method : callable
Implements optimization method
Notes
-----
The base Optimizer does not support any constraints by default; individual
optimizers should explicitly set this list to the specific constraints
it supports.
"""
supported_constraints = []
def __init__(self, opt_method):
self._opt_method = opt_method
self._maxiter = DEFAULT_MAXITER
self._eps = DEFAULT_EPS
self._acc = DEFAULT_ACC
@property
def maxiter(self):
"""Maximum number of iterations."""
return self._maxiter
@maxiter.setter
def maxiter(self, val):
"""Set maxiter."""
self._maxiter = val
@property
def eps(self):
"""Step for the forward difference approximation of the Jacobian."""
return self._eps
@eps.setter
def eps(self, val):
"""Set eps value."""
self._eps = val
@property
def acc(self):
"""Requested accuracy."""
return self._acc
@acc.setter
def acc(self, val):
"""Set accuracy."""
self._acc = val
def __repr__(self):
fmt = f"{self.__class__.__name__}()"
return fmt
@property
def opt_method(self):
"""Return the optimization method."""
return self._opt_method
@abc.abstractmethod
def __call__(self):
raise NotImplementedError("Subclasses should implement this method")
class SLSQP(Optimization):
"""
Sequential Least Squares Programming optimization algorithm.
The algorithm is described in [1]_. It supports tied and fixed
parameters, as well as bounded constraints. Uses
`scipy.optimize.fmin_slsqp`.
References
----------
.. [1] http://www.netlib.org/toms/733
"""
supported_constraints = ["bounds", "eqcons", "ineqcons", "fixed", "tied"]
def __init__(self):
from scipy.optimize import fmin_slsqp
super().__init__(fmin_slsqp)
self.fit_info = {
"final_func_val": None,
"numiter": None,
"exit_mode": None,
"message": None,
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
kwargs["iter"] = kwargs.pop("maxiter", self._maxiter)
if "epsilon" not in kwargs:
kwargs["epsilon"] = self._eps
if "acc" not in kwargs:
kwargs["acc"] = self._acc
# Get the verbosity level
disp = kwargs.pop("verblevel", None)
# set the values of constraints to match the requirements of fmin_slsqp
model = fargs[0]
pars = [getattr(model, name) for name in model.param_names]
bounds = [par.bounds for par in pars if not (par.fixed or par.tied)]
bounds = np.asarray(bounds)
for i in bounds:
if i[0] is None:
i[0] = DEFAULT_BOUNDS[0]
if i[1] is None:
i[1] = DEFAULT_BOUNDS[1]
# older versions of scipy require this array to be float
bounds = np.asarray(bounds, dtype=float)
eqcons = np.array(model.eqcons)
ineqcons = np.array(model.ineqcons)
fitparams, final_func_val, numiter, exit_mode, mess = self.opt_method(
objfunc,
initval,
args=fargs,
full_output=True,
disp=disp,
bounds=bounds,
eqcons=eqcons,
ieqcons=ineqcons,
**kwargs,
)
self.fit_info["final_func_val"] = final_func_val
self.fit_info["numiter"] = numiter
self.fit_info["exit_mode"] = exit_mode
self.fit_info["message"] = mess
if exit_mode != 0:
warnings.warn(
"The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning,
)
return fitparams, self.fit_info
class Simplex(Optimization):
"""
Neald-Mead (downhill simplex) algorithm.
This algorithm [1]_ only uses function values, not derivatives.
Uses `scipy.optimize.fmin`.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
"""
supported_constraints = ["bounds", "fixed", "tied"]
def __init__(self):
from scipy.optimize import fmin as simplex
super().__init__(simplex)
self.fit_info = {
"final_func_val": None,
"numiter": None,
"exit_mode": None,
"num_function_calls": None,
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
if "maxiter" not in kwargs:
kwargs["maxiter"] = self._maxiter
if "acc" in kwargs:
self._acc = kwargs["acc"]
kwargs.pop("acc")
if "xtol" in kwargs:
self._acc = kwargs["xtol"]
kwargs.pop("xtol")
# Get the verbosity level
disp = kwargs.pop("verblevel", None)
fitparams, final_func_val, numiter, funcalls, exit_mode = self.opt_method(
objfunc,
initval,
args=fargs,
xtol=self._acc,
disp=disp,
full_output=True,
**kwargs,
)
self.fit_info["final_func_val"] = final_func_val
self.fit_info["numiter"] = numiter
self.fit_info["exit_mode"] = exit_mode
self.fit_info["num_function_calls"] = funcalls
if self.fit_info["exit_mode"] == 1:
warnings.warn(
"The fit may be unsuccessful; "
"Maximum number of function evaluations reached.",
AstropyUserWarning,
)
elif self.fit_info["exit_mode"] == 2:
warnings.warn(
"The fit may be unsuccessful; Maximum number of iterations reached.",
AstropyUserWarning,
)
return fitparams, self.fit_info
|
89a053799c5d19c3052c6615ca80c25498cd6af082212889c128de00ab0a10f6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Implements rotations, including spherical rotations as defined in WCS Paper II [1]_.
`RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in
WCS Paper II to rotate to/from a native sphere and the celestial sphere.
The implementation uses `EulerAngleRotation`. The model parameters are
three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point
in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial
pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat``
and ``-(lon_pole-90)``.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
# pylint: disable=invalid-name, too-many-arguments, no-member
import math
from functools import reduce
import numpy as np
from astropy import units as u
from astropy.coordinates.matrix_utilities import rotation_matrix
from .core import Model
from .parameters import Parameter
from .utils import _to_orig_unit, _to_radian
__all__ = [
"RotateCelestial2Native",
"RotateNative2Celestial",
"Rotation2D",
"EulerAngleRotation",
"RotationSequence3D",
"SphericalRotationSequence",
]
def _create_matrix(angles, axes_order):
matrices = []
for angle, axis in zip(angles, axes_order):
if isinstance(angle, u.Quantity):
angle = angle.value
angle = angle.item()
matrices.append(rotation_matrix(angle, axis, unit=u.rad))
return reduce(np.matmul, matrices[::-1])
def spherical2cartesian(alpha, delta):
alpha = np.deg2rad(alpha)
delta = np.deg2rad(delta)
x = np.cos(alpha) * np.cos(delta)
y = np.cos(delta) * np.sin(alpha)
z = np.sin(delta)
return np.array([x, y, z])
def cartesian2spherical(x, y, z):
h = np.hypot(x, y)
alpha = np.rad2deg(np.arctan2(y, x))
delta = np.rad2deg(np.arctan2(z, h))
return alpha, delta
class RotationSequence3D(Model):
"""
Perform a series of rotations about different axis in 3D space.
Positive angles represent a counter-clockwise rotation.
Parameters
----------
angles : array-like
Angles of rotation in deg in the order of axes_order.
axes_order : str
A sequence of 'x', 'y', 'z' corresponding to axis of rotation.
Examples
--------
>>> model = RotationSequence3D([1.1, 2.1, 3.1, 4.1], axes_order='xyzx')
"""
standard_broadcasting = False
_separable = False
n_inputs = 3
n_outputs = 3
angles = Parameter(
default=[],
getter=_to_orig_unit,
setter=_to_radian,
description="Angles of rotation in deg in the order of axes_order",
)
def __init__(self, angles, axes_order, name=None):
self.axes = ["x", "y", "z"]
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError(
f"Unrecognized axis label {unrecognized}; should be one of {self.axes} "
)
self.axes_order = axes_order
if len(angles) != len(axes_order):
raise ValueError(
f"The number of angles {len(angles)} should match "
f"the number of axes {len(axes_order)}."
)
super().__init__(angles, name=name)
self._inputs = ("x", "y", "z")
self._outputs = ("x", "y", "z")
@property
def inverse(self):
"""Inverse rotation."""
angles = self.angles.value[::-1] * -1
return self.__class__(angles, axes_order=self.axes_order[::-1])
def evaluate(self, x, y, z, angles):
"""
Apply the rotation to a set of 3D Cartesian coordinates.
"""
if x.shape != y.shape or x.shape != z.shape:
raise ValueError("Expected input arrays to have the same shape")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.flatten(), y.flatten(), z.flatten()])
result = np.dot(_create_matrix(angles[0], self.axes_order), inarr)
x, y, z = result[0], result[1], result[2]
x.shape = y.shape = z.shape = orig_shape
return x, y, z
class SphericalRotationSequence(RotationSequence3D):
"""
Perform a sequence of rotations about arbitrary number of axes
in spherical coordinates.
Parameters
----------
angles : list
A sequence of angles (in deg).
axes_order : str
A sequence of characters ('x', 'y', or 'z') corresponding to the
axis of rotation and matching the order in ``angles``.
"""
def __init__(self, angles, axes_order, name=None, **kwargs):
self._n_inputs = 2
self._n_outputs = 2
super().__init__(angles, axes_order=axes_order, name=name, **kwargs)
self._inputs = ("lon", "lat")
self._outputs = ("lon", "lat")
@property
def n_inputs(self):
return self._n_inputs
@property
def n_outputs(self):
return self._n_outputs
def evaluate(self, lon, lat, angles):
x, y, z = spherical2cartesian(lon, lat)
x1, y1, z1 = super().evaluate(x, y, z, angles)
lon, lat = cartesian2spherical(x1, y1, z1)
return lon, lat
class _EulerRotation:
"""
Base class which does the actual computation.
"""
_separable = False
def evaluate(self, alpha, delta, phi, theta, psi, axes_order):
shape = None
if isinstance(alpha, np.ndarray):
alpha = alpha.flatten()
delta = delta.flatten()
shape = alpha.shape
inp = spherical2cartesian(alpha, delta)
matrix = _create_matrix([phi, theta, psi], axes_order)
result = np.dot(matrix, inp)
a, b = cartesian2spherical(*result)
if shape is not None:
a.shape = shape
b.shape = shape
return a, b
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
class EulerAngleRotation(_EulerRotation, Model):
"""
Implements Euler angle intrinsic rotations.
Rotates one coordinate system into another (fixed) coordinate system.
All coordinate systems are right-handed. The sign of the angles is
determined by the right-hand rule..
Parameters
----------
phi, theta, psi : float or `~astropy.units.Quantity` ['angle']
"proper" Euler angles in deg.
If floats, they should be in deg.
axes_order : str
A 3 character string, a combination of 'x', 'y' and 'z',
where each character denotes an axis in 3D space.
"""
n_inputs = 2
n_outputs = 2
phi = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="1st Euler angle (Quantity or value in deg)",
)
theta = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="2nd Euler angle (Quantity or value in deg)",
)
psi = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="3rd Euler angle (Quantity or value in deg)",
)
def __init__(self, phi, theta, psi, axes_order, **kwargs):
self.axes = ["x", "y", "z"]
if len(axes_order) != 3:
raise TypeError(
"Expected axes_order to be a character sequence of length 3, "
f"got {axes_order}"
)
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError(
f"Unrecognized axis label {unrecognized}; should be one of {self.axes}"
)
self.axes_order = axes_order
qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]]
if any(qs) and not all(qs):
raise TypeError(
"All parameters should be of the same type - float or Quantity."
)
super().__init__(phi=phi, theta=theta, psi=psi, **kwargs)
self._inputs = ("alpha", "delta")
self._outputs = ("alpha", "delta")
@property
def inverse(self):
return self.__class__(
phi=-self.psi,
theta=-self.theta,
psi=-self.phi,
axes_order=self.axes_order[::-1],
)
def evaluate(self, alpha, delta, phi, theta, psi):
a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order)
return a, b
class _SkyRotation(_EulerRotation, Model):
"""
Base class for RotateNative2Celestial and RotateCelestial2Native.
"""
lon = Parameter(
default=0, getter=_to_orig_unit, setter=_to_radian, description="Latitude"
)
lat = Parameter(
default=0, getter=_to_orig_unit, setter=_to_radian, description="Longtitude"
)
lon_pole = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="Longitude of a pole",
)
def __init__(self, lon, lat, lon_pole, **kwargs):
qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]]
if any(qs) and not all(qs):
raise TypeError(
"All parameters should be of the same type - float or Quantity."
)
super().__init__(lon, lat, lon_pole, **kwargs)
self.axes_order = "zxz"
def _evaluate(self, phi, theta, lon, lat, lon_pole):
alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole, self.axes_order)
mask = alpha < 0
if isinstance(mask, np.ndarray):
alpha[mask] += 360
else:
alpha += 360
return alpha, delta
class RotateNative2Celestial(_SkyRotation):
"""
Transform from Native to Celestial Spherical Coordinates.
Parameters
----------
lon : float or `~astropy.units.Quantity` ['angle']
Celestial longitude of the fiducial point.
lat : float or `~astropy.units.Quantity` ['angle']
Celestial latitude of the fiducial point.
lon_pole : float or `~astropy.units.Quantity` ['angle']
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they
should be in units of deg. Inputs are angles on the native sphere.
Outputs are angles on the celestial sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
self.inputs = ("phi_N", "theta_N")
self.outputs = ("alpha_C", "delta_C")
def evaluate(self, phi_N, theta_N, lon, lat, lon_pole):
"""
Parameters
----------
phi_N, theta_N : float or `~astropy.units.Quantity` ['angle']
Angles in the Native coordinate system.
it is assumed that numerical only inputs are in degrees.
If float, assumed in degrees.
lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle']
Parameter values when the model was initialized.
If float, assumed in degrees.
Returns
-------
alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle']
Angles on the Celestial sphere.
If float, in degrees.
"""
# The values are in radians since they have already been through the setter.
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = lon_pole - np.pi / 2
theta = -(np.pi / 2 - lat)
psi = -(np.pi / 2 + lon)
alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi)
return alpha_C, delta_C
@property
def inverse(self):
# convert to angles on the celestial sphere
return RotateCelestial2Native(self.lon, self.lat, self.lon_pole)
class RotateCelestial2Native(_SkyRotation):
"""
Transform from Celestial to Native Spherical Coordinates.
Parameters
----------
lon : float or `~astropy.units.Quantity` ['angle']
Celestial longitude of the fiducial point.
lat : float or `~astropy.units.Quantity` ['angle']
Celestial latitude of the fiducial point.
lon_pole : float or `~astropy.units.Quantity` ['angle']
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be
in units of deg. Inputs are angles on the celestial sphere.
Outputs are angles on the native sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
# Inputs are angles on the celestial sphere
self.inputs = ("alpha_C", "delta_C")
# Outputs are angles on the native sphere
self.outputs = ("phi_N", "theta_N")
def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole):
"""
Parameters
----------
alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle']
Angles in the Celestial coordinate frame.
If float, assumed in degrees.
lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle']
Parameter values when the model was initialized.
If float, assumed in degrees.
Returns
-------
phi_N, theta_N : float or `~astropy.units.Quantity` ['angle']
Angles on the Native sphere.
If float, in degrees.
"""
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = np.pi / 2 + lon
theta = np.pi / 2 - lat
psi = -(lon_pole - np.pi / 2)
phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi)
return phi_N, theta_N
@property
def inverse(self):
return RotateNative2Celestial(self.lon, self.lat, self.lon_pole)
class Rotation2D(Model):
"""
Perform a 2D rotation given an angle.
Positive angles represent a counter-clockwise rotation and vice-versa.
Parameters
----------
angle : float or `~astropy.units.Quantity` ['angle']
Angle of rotation (if float it should be in deg).
"""
n_inputs = 2
n_outputs = 2
_separable = False
angle = Parameter(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Angle of rotation (Quantity or value in deg)",
)
def __init__(self, angle=angle, **kwargs):
super().__init__(angle=angle, **kwargs)
self._inputs = ("x", "y")
self._outputs = ("x", "y")
@property
def inverse(self):
"""Inverse rotation."""
return self.__class__(angle=-self.angle)
@classmethod
def evaluate(cls, x, y, angle):
"""
Rotate (x, y) about ``angle``.
Parameters
----------
x, y : array-like
Input quantities
angle : float or `~astropy.units.Quantity` ['angle']
Angle of rotations.
If float, assumed in degrees.
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
# If one argument has units, enforce they both have units and they are compatible.
x_unit = getattr(x, "unit", None)
y_unit = getattr(y, "unit", None)
has_units = x_unit is not None and y_unit is not None
if x_unit != y_unit:
if has_units and y_unit.is_equivalent(x_unit):
y = y.to(x_unit)
y_unit = x_unit
else:
raise u.UnitsError("x and y must have compatible units")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.flatten(), y.flatten()])
if isinstance(angle, u.Quantity):
angle = angle.to_value(u.rad)
result = np.dot(cls._compute_matrix(angle), inarr)
x, y = result[0], result[1]
x.shape = y.shape = orig_shape
if has_units:
return u.Quantity(x, unit=x_unit, subok=True), u.Quantity(
y, unit=y_unit, subok=True
)
return x, y
@staticmethod
def _compute_matrix(angle):
return np.array(
[[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]],
dtype=np.float64,
)
|
1ec925bc67072c57c25905b8e1f576046f10eca08e2839bb970363d1584a8314 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Functions to determine if a model is separable, i.e.
if the model outputs are independent.
It analyzes ``n_inputs``, ``n_outputs`` and the operators
in a compound model by stepping through the transforms
and creating a ``coord_matrix`` of shape (``n_outputs``, ``n_inputs``).
Each modeling operator is represented by a function which
takes two simple models (or two ``coord_matrix`` arrays) and
returns an array of shape (``n_outputs``, ``n_inputs``).
"""
import numpy as np
from .core import CompoundModel, Model, ModelDefinitionError
from .mappings import Mapping
__all__ = ["is_separable", "separability_matrix"]
def is_separable(transform):
"""
A separability test for the outputs of a transform.
Parameters
----------
transform : `~astropy.modeling.core.Model`
A (compound) model.
Returns
-------
is_separable : ndarray
A boolean array with size ``transform.n_outputs`` where
each element indicates whether the output is independent
and the result of a separable transform.
Examples
--------
>>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D
>>> is_separable(Shift(1) & Shift(2) | Scale(1) & Scale(2))
array([ True, True]...)
>>> is_separable(Shift(1) & Shift(2) | Rotation2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \
Polynomial2D(1) & Polynomial2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]))
array([ True, True, True, True]...)
"""
if transform.n_inputs == 1 and transform.n_outputs > 1:
is_separable = np.array([False] * transform.n_outputs).T
return is_separable
separable_matrix = _separable(transform)
is_separable = separable_matrix.sum(1)
is_separable = np.where(is_separable != 1, False, True)
return is_separable
def separability_matrix(transform):
"""
Compute the correlation between outputs and inputs.
Parameters
----------
transform : `~astropy.modeling.core.Model`
A (compound) model.
Returns
-------
separable_matrix : ndarray
A boolean correlation matrix of shape (n_outputs, n_inputs).
Indicates the dependence of outputs on inputs. For completely
independent outputs, the diagonal elements are True and
off-diagonal elements are False.
Examples
--------
>>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D
>>> separability_matrix(Shift(1) & Shift(2) | Scale(1) & Scale(2))
array([[ True, False], [False, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Rotation2D(2))
array([[ True, True], [ True, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \
Polynomial2D(1) & Polynomial2D(2))
array([[ True, True], [ True, True]]...)
>>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]))
array([[ True, False], [False, True], [ True, False], [False, True]]...)
"""
if transform.n_inputs == 1 and transform.n_outputs > 1:
return np.ones((transform.n_outputs, transform.n_inputs), dtype=np.bool_)
separable_matrix = _separable(transform)
separable_matrix = np.where(separable_matrix != 0, True, False)
return separable_matrix
def _compute_n_outputs(left, right):
"""
Compute the number of outputs of two models.
The two models are the left and right model to an operation in
the expression tree of a compound model.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
"""
if isinstance(left, Model):
lnout = left.n_outputs
else:
lnout = left.shape[0]
if isinstance(right, Model):
rnout = right.n_outputs
else:
rnout = right.shape[0]
noutp = lnout + rnout
return noutp
def _arith_oper(left, right):
"""
Function corresponding to one of the arithmetic operators
['+', '-'. '*', '/', '**'].
This always returns a nonseparable output.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
# models have the same number of inputs and outputs
def _n_inputs_outputs(input):
if isinstance(input, Model):
n_outputs, n_inputs = input.n_outputs, input.n_inputs
else:
n_outputs, n_inputs = input.shape
return n_inputs, n_outputs
left_inputs, left_outputs = _n_inputs_outputs(left)
right_inputs, right_outputs = _n_inputs_outputs(right)
if left_inputs != right_inputs or left_outputs != right_outputs:
raise ModelDefinitionError(
"Unsupported operands for arithmetic operator: left"
f" (n_inputs={left_inputs}, n_outputs={left_outputs}) and right"
f" (n_inputs={right_inputs}, n_outputs={right_outputs}); models must have"
" the same n_inputs and the same n_outputs for this operator."
)
result = np.ones((left_outputs, left_inputs))
return result
def _coord_matrix(model, pos, noutp):
"""
Create an array representing inputs and outputs of a simple model.
The array has a shape (noutp, model.n_inputs).
Parameters
----------
model : `astropy.modeling.Model`
model
pos : str
Position of this model in the expression tree.
One of ['left', 'right'].
noutp : int
Number of outputs of the compound model of which the input model
is a left or right child.
"""
if isinstance(model, Mapping):
axes = []
for i in model.mapping:
axis = np.zeros((model.n_inputs,))
axis[i] = 1
axes.append(axis)
m = np.vstack(axes)
mat = np.zeros((noutp, model.n_inputs))
if pos == "left":
mat[: model.n_outputs, : model.n_inputs] = m
else:
mat[-model.n_outputs :, -model.n_inputs :] = m
return mat
if not model.separable:
# this does not work for more than 2 coordinates
mat = np.zeros((noutp, model.n_inputs))
if pos == "left":
mat[: model.n_outputs, : model.n_inputs] = 1
else:
mat[-model.n_outputs :, -model.n_inputs :] = 1
else:
mat = np.zeros((noutp, model.n_inputs))
for i in range(model.n_inputs):
mat[i, i] = 1
if pos == "right":
mat = np.roll(mat, (noutp - model.n_outputs))
return mat
def _cstack(left, right):
"""
Function corresponding to '&' operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
noutp = _compute_n_outputs(left, right)
if isinstance(left, Model):
cleft = _coord_matrix(left, "left", noutp)
else:
cleft = np.zeros((noutp, left.shape[1]))
cleft[: left.shape[0], : left.shape[1]] = left
if isinstance(right, Model):
cright = _coord_matrix(right, "right", noutp)
else:
cright = np.zeros((noutp, right.shape[1]))
cright[-right.shape[0] :, -right.shape[1] :] = right
return np.hstack([cleft, cright])
def _cdot(left, right):
"""
Function corresponding to "|" operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
left, right = right, left
def _n_inputs_outputs(input, position):
"""
Return ``n_inputs``, ``n_outputs`` for a model or coord_matrix.
"""
if isinstance(input, Model):
coords = _coord_matrix(input, position, input.n_outputs)
else:
coords = input
return coords
cleft = _n_inputs_outputs(left, "left")
cright = _n_inputs_outputs(right, "right")
try:
result = np.dot(cleft, cright)
except ValueError:
raise ModelDefinitionError(
'Models cannot be combined with the "|" operator; '
f"left coord_matrix is {cright}, right coord_matrix is {cleft}"
)
return result
def _separable(transform):
"""
Calculate the separability of outputs.
Parameters
----------
transform : `astropy.modeling.Model`
A transform (usually a compound model).
Returns :
is_separable : ndarray of dtype np.bool
An array of shape (transform.n_outputs,) of boolean type
Each element represents the separablity of the corresponding output.
"""
if (
transform_matrix := transform._calculate_separability_matrix()
) is not NotImplemented:
return transform_matrix
elif isinstance(transform, CompoundModel):
sepleft = _separable(transform.left)
sepright = _separable(transform.right)
return _operators[transform.op](sepleft, sepright)
elif isinstance(transform, Model):
return _coord_matrix(transform, "left", transform.n_outputs)
# Maps modeling operators to a function computing and represents the
# relationship of axes as an array of 0-es and 1-s
_operators = {
"&": _cstack,
"|": _cdot,
"+": _arith_oper,
"-": _arith_oper,
"*": _arith_oper,
"/": _arith_oper,
"**": _arith_oper,
}
|
7cd894427f2c8b7259119c34a3579b2157aa7677a1079a9d2c25649d7af162a5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides utility functions for the models package.
"""
import warnings
# pylint: disable=invalid-name
from collections import UserDict
from collections.abc import MutableMapping
from inspect import signature
import numpy as np
from astropy import units as u
from astropy.utils.decorators import deprecated
__doctest_skip__ = ["AliasDict"]
__all__ = ["AliasDict", "poly_map_domain", "comb", "ellipse_extent"]
deprecation_msg = """
AliasDict is deprecated because it no longer serves a function anywhere
inside astropy.
"""
@deprecated("5.0", deprecation_msg)
class AliasDict(MutableMapping):
"""
Creates a `dict` like object that wraps an existing `dict` or other
`MutableMapping`, along with a `dict` of *key aliases* that translate
between specific keys in this dict to different keys in the underlying
dict.
In other words, keys that do not have an associated alias are accessed and
stored like a normal `dict`. However, a key that has an alias is accessed
and stored to the "parent" dict via the alias.
Parameters
----------
parent : dict-like
The parent `dict` that aliased keys and accessed from and stored to.
aliases : dict-like
Maps keys in this dict to their associated keys in the parent dict.
Examples
--------
>>> parent = {'a': 1, 'b': 2, 'c': 3}
>>> aliases = {'foo': 'a', 'bar': 'c'}
>>> alias_dict = AliasDict(parent, aliases)
>>> alias_dict['foo']
1
>>> alias_dict['bar']
3
Keys in the original parent dict are not visible if they were not
aliased:
>>> alias_dict['b']
Traceback (most recent call last):
...
KeyError: 'b'
Likewise, updates to aliased keys are reflected back in the parent dict:
>>> alias_dict['foo'] = 42
>>> alias_dict['foo']
42
>>> parent['a']
42
However, updates/insertions to keys that are *not* aliased are not
reflected in the parent dict:
>>> alias_dict['qux'] = 99
>>> alias_dict['qux']
99
>>> 'qux' in parent
False
In particular, updates on the `AliasDict` to a key that is equal to
one of the aliased keys in the parent dict does *not* update the parent
dict. For example, ``alias_dict`` aliases ``'foo'`` to ``'a'``. But
assigning to a key ``'a'`` on the `AliasDict` does not impact the
parent:
>>> alias_dict['a'] = 'nope'
>>> alias_dict['a']
'nope'
>>> parent['a']
42
"""
_store_type = dict
"""
Subclasses may override this to use other mapping types as the underlying
storage, for example an `OrderedDict`. However, even in this case
additional work may be needed to get things like the ordering right.
"""
def __init__(self, parent, aliases):
self._parent = parent
self._store = self._store_type()
self._aliases = dict(aliases)
def __getitem__(self, key):
if key in self._aliases:
try:
return self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
return self._store[key]
def __setitem__(self, key, value):
if key in self._aliases:
self._parent[self._aliases[key]] = value
else:
self._store[key] = value
def __delitem__(self, key):
if key in self._aliases:
try:
del self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
else:
del self._store[key]
def __iter__(self):
"""
First iterates over keys from the parent dict (if the aliased keys are
present in the parent), followed by any keys in the local store.
"""
for key, alias in self._aliases.items():
if alias in self._parent:
yield key
for key in self._store:
yield key
def __len__(self):
return len(list(iter(self)))
def __repr__(self):
# repr() just like any other dict--this should look transparent
store_copy = self._store_type()
for key, alias in self._aliases.items():
if alias in self._parent:
store_copy[key] = self._parent[alias]
store_copy.update(self._store)
return repr(store_copy)
def make_binary_operator_eval(oper, f, g):
"""
Given a binary operator (as a callable of two arguments) ``oper`` and
two callables ``f`` and ``g`` which accept the same arguments,
returns a *new* function that takes the same arguments as ``f`` and ``g``,
but passes the outputs of ``f`` and ``g`` in the given ``oper``.
``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The
given operator is applied element-wise to tuple outputs).
Example
-------
>>> from operator import add
>>> def prod(x, y):
... return (x * y,)
...
>>> sum_of_prod = make_binary_operator_eval(add, prod, prod)
>>> sum_of_prod(3, 5)
(30,)
"""
return lambda inputs, params: tuple(
oper(x, y) for x, y in zip(f(inputs, params), g(inputs, params))
)
def poly_map_domain(oldx, domain, window):
"""
Map domain into window by shifting and scaling.
Parameters
----------
oldx : array
original coordinates
domain : list or tuple of length 2
function domain
window : list or tuple of length 2
range into which to map the domain
"""
domain = np.array(domain, dtype=np.float64)
window = np.array(window, dtype=np.float64)
if domain.shape != (2,) or window.shape != (2,):
raise ValueError('Expected "domain" and "window" to be a tuple of size 2.')
scl = (window[1] - window[0]) / (domain[1] - domain[0])
off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0])
return off + scl * oldx
def _validate_domain_window(value):
if value is not None:
if np.asanyarray(value).shape != (2,):
raise ValueError("domain and window should be tuples of size 2.")
return tuple(value)
return value
@deprecated("5.3", alternative="math.comb")
def comb(N, k):
"""
The number of combinations of N things taken k at a time.
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
"""
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in range(min(k, N - k)):
val = (val * (N - j)) / (j + 1)
return val
def array_repr_oneline(array):
"""
Represents a multi-dimensional Numpy array flattened onto a single line.
"""
r = np.array2string(array, separator=", ", suppress_small=True)
return " ".join(line.strip() for line in r.splitlines())
def combine_labels(left, right):
"""
For use with the join operator &: Combine left input/output labels with
right input/output labels.
If none of the labels conflict then this just returns a sum of tuples.
However if *any* of the labels conflict, this appends '0' to the left-hand
labels and '1' to the right-hand labels so there is no ambiguity).
"""
if set(left).intersection(right):
left = tuple(label + "0" for label in left)
right = tuple(label + "1" for label in right)
return left + right
def ellipse_extent(a, b, theta):
"""
Calculates the half size of a box encapsulating a rotated 2D
ellipse.
Parameters
----------
a : float or `~astropy.units.Quantity`
The ellipse semimajor axis.
b : float or `~astropy.units.Quantity`
The ellipse semiminor axis.
theta : float or `~astropy.units.Quantity` ['angle']
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or
a value in radians (as a float). The rotation angle increases
counterclockwise.
Returns
-------
offsets : tuple
The absolute value of the offset distances from the ellipse center that
define its bounding box region, ``(dx, dy)``.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Ellipse2D
from astropy.modeling.utils import ellipse_extent, render_model
amplitude = 1
x0 = 50
y0 = 50
a = 30
b = 10
theta = np.pi / 4
model = Ellipse2D(amplitude, x0, y0, a, b, theta)
dx, dy = ellipse_extent(a, b, theta)
limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy]
model.bounding_box = limits
image = render_model(model)
plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5,
extent = limits)
plt.show()
"""
from .parameters import Parameter # prevent circular import
if isinstance(theta, Parameter):
if theta.quantity is None:
theta = theta.value
else:
theta = theta.quantity
t = np.arctan2(-b * np.tan(theta), a)
dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta)
t = np.arctan2(b, a * np.tan(theta))
dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta)
if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity):
return np.abs(u.Quantity([dx, dy], subok=True))
return np.abs([dx, dy])
def get_inputs_and_params(func):
"""
Given a callable, determine the input variables and the
parameters.
Parameters
----------
func : callable
Returns
-------
inputs, params : tuple
Each entry is a list of inspect.Parameter objects
"""
sig = signature(func)
inputs = []
params = []
for param in sig.parameters.values():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("Signature must not have *args or **kwargs")
if param.default == param.empty:
inputs.append(param)
else:
params.append(param)
return inputs, params
def _combine_equivalency_dict(keys, eq1=None, eq2=None):
# Given two dictionaries that give equivalencies for a set of keys, for
# example input value names, return a dictionary that includes all the
# equivalencies
eq = {}
for key in keys:
eq[key] = []
if eq1 is not None and key in eq1:
eq[key].extend(eq1[key])
if eq2 is not None and key in eq2:
eq[key].extend(eq2[key])
return eq
def _to_radian(value):
"""Convert ``value`` to radian."""
if isinstance(value, u.Quantity):
return value.to(u.rad)
return np.deg2rad(value)
def _to_orig_unit(value, raw_unit=None, orig_unit=None):
"""Convert value with ``raw_unit`` to ``orig_unit``."""
if raw_unit is not None:
return (value * raw_unit).to(orig_unit)
return np.rad2deg(value)
class _ConstraintsDict(UserDict):
"""
Wrapper around UserDict to allow updating the constraints
on a Parameter when the dictionary is updated.
"""
def __init__(self, model, constraint_type):
self._model = model
self.constraint_type = constraint_type
c = {}
for name in model.param_names:
param = getattr(model, name)
c[name] = getattr(param, constraint_type)
super().__init__(c)
def __setitem__(self, key, val):
super().__setitem__(key, val)
param = getattr(self._model, key)
setattr(param, self.constraint_type, val)
class _SpecialOperatorsDict(UserDict):
"""
Wrapper around UserDict to allow for better tracking of the Special
Operators for CompoundModels. This dictionary is structured so that
one cannot inadvertently overwrite an existing special operator.
Parameters
----------
unique_id: int
the last used unique_id for a SPECIAL OPERATOR
special_operators: dict
a dictionary containing the special_operators
Notes
-----
Direct setting of operators (`dict[key] = value`) into the
dictionary has been deprecated in favor of the `.add(name, value)`
method, so that unique dictionary keys can be generated and tracked
consistently.
"""
def __init__(self, unique_id=0, special_operators={}):
super().__init__(special_operators)
self._unique_id = unique_id
def _set_value(self, key, val):
if key in self:
raise ValueError(f'Special operator "{key}" already exists')
else:
super().__setitem__(key, val)
def __setitem__(self, key, val):
self._set_value(key, val)
warnings.warn(
DeprecationWarning(
"""
Special operator dictionary assignment has been deprecated.
Please use `.add` instead, so that you can capture a unique
key for your operator.
"""
)
)
def _get_unique_id(self):
self._unique_id += 1
return self._unique_id
def add(self, operator_name, operator):
"""
Adds a special operator to the dictionary, and then returns the
unique key that the operator is stored under for later reference.
Parameters
----------
operator_name: str
the name for the operator
operator: function
the actual operator function which will be used
Returns
-------
the unique operator key for the dictionary
`(operator_name, unique_id)`
"""
key = (operator_name, self._get_unique_id())
self._set_value(key, operator)
return key
|
4ed31f4cb40b84a1faf2ed58e8f323ede862465d6227b78de4e44df88659938b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module is to contain an improved bounding box.
"""
import abc
import copy
import warnings
from collections import namedtuple
from typing import Any, Callable, Dict, List, Tuple
import numpy as np
from astropy.units import Quantity
from astropy.utils import isiterable
__all__ = ["ModelBoundingBox", "CompoundBoundingBox"]
_BaseInterval = namedtuple("_BaseInterval", "lower upper")
class _Interval(_BaseInterval):
"""
A single input's bounding box interval.
Parameters
----------
lower : float
The lower bound of the interval
upper : float
The upper bound of the interval
Methods
-------
validate :
Constructs a valid interval
outside :
Determine which parts of an input array are outside the interval.
domain :
Constructs a discretization of the points inside the interval.
"""
def __repr__(self):
return f"Interval(lower={self.lower}, upper={self.upper})"
def copy(self):
return copy.deepcopy(self)
@staticmethod
def _validate_shape(interval):
"""Validate the shape of an interval representation."""
MESSAGE = """An interval must be some sort of sequence of length 2"""
try:
shape = np.shape(interval)
except TypeError:
try:
# np.shape does not work with lists of Quantities
if len(interval) == 1:
interval = interval[0]
shape = np.shape([b.to_value() for b in interval])
except (ValueError, TypeError, AttributeError):
raise ValueError(MESSAGE)
valid_shape = shape in ((2,), (1, 2), (2, 0))
if not valid_shape:
valid_shape = (
len(shape) > 0
and shape[0] == 2
and all(isinstance(b, np.ndarray) for b in interval)
)
if not isiterable(interval) or not valid_shape:
raise ValueError(MESSAGE)
@classmethod
def _validate_bounds(cls, lower, upper):
"""Validate the bounds are reasonable and construct an interval from them."""
if (np.asanyarray(lower) > np.asanyarray(upper)).all():
warnings.warn(
f"Invalid interval: upper bound {upper} "
f"is strictly less than lower bound {lower}.",
RuntimeWarning,
)
return cls(lower, upper)
@classmethod
def validate(cls, interval):
"""
Construct and validate an interval.
Parameters
----------
interval : iterable
A representation of the interval.
Returns
-------
A validated interval.
"""
cls._validate_shape(interval)
if len(interval) == 1:
interval = tuple(interval[0])
else:
interval = tuple(interval)
return cls._validate_bounds(interval[0], interval[1])
def outside(self, _input: np.ndarray):
"""
Parameters
----------
_input : np.ndarray
The evaluation input in the form of an array.
Returns
-------
Boolean array indicating which parts of _input are outside the interval:
True -> position outside interval
False -> position inside interval
"""
return np.logical_or(_input < self.lower, _input > self.upper)
def domain(self, resolution):
return np.arange(self.lower, self.upper + resolution, resolution)
# The interval where all ignored inputs can be found.
_ignored_interval = _Interval.validate((-np.inf, np.inf))
def get_index(model, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
if isinstance(key, str):
if key in model.inputs:
index = model.inputs.index(key)
else:
raise ValueError(f"'{key}' is not one of the inputs: {model.inputs}.")
elif np.issubdtype(type(key), np.integer):
if 0 <= key < len(model.inputs):
index = key
else:
raise IndexError(
f"Integer key: {key} must be non-negative and < {len(model.inputs)}."
)
else:
raise ValueError(f"Key value: {key} must be string or integer.")
return index
def get_name(model, index: int):
"""Get the input name corresponding to the input index."""
return model.inputs[index]
class _BoundingDomain(abc.ABC):
"""
Base class for ModelBoundingBox and CompoundBoundingBox.
This is where all the `~astropy.modeling.core.Model` evaluation
code for evaluating with a bounding box is because it is common
to both types of bounding box.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this bounding domain is for.
prepare_inputs :
Generates the necessary input information so that model can
be evaluated only for input points entirely inside bounding_box.
This needs to be implemented by a subclass. Note that most of
the implementation is in ModelBoundingBox.
prepare_outputs :
Fills the output values in for any input points outside the
bounding_box.
evaluate :
Performs a complete model evaluation while enforcing the bounds
on the inputs and returns a complete output.
"""
def __init__(self, model, ignored: List[int] = None, order: str = "C"):
self._model = model
self._ignored = self._validate_ignored(ignored)
self._order = self._get_order(order)
@property
def model(self):
return self._model
@property
def order(self) -> str:
return self._order
@property
def ignored(self) -> List[int]:
return self._ignored
def _get_order(self, order: str = None) -> str:
"""
Get if bounding_box is C/python ordered or Fortran/mathematically
ordered.
"""
if order is None:
order = self._order
if order not in ("C", "F"):
raise ValueError(
"order must be either 'C' (C/python order) or "
f"'F' (Fortran/mathematical order), got: {order}."
)
return order
def _get_index(self, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
return get_index(self._model, key)
def _get_name(self, index: int):
"""Get the input name corresponding to the input index."""
return get_name(self._model, index)
@property
def ignored_inputs(self) -> List[str]:
return [self._get_name(index) for index in self._ignored]
def _validate_ignored(self, ignored: list) -> List[int]:
if ignored is None:
return []
else:
return [self._get_index(key) for key in ignored]
def __call__(self, *args, **kwargs):
raise NotImplementedError(
"This bounding box is fixed by the model and does not have "
"adjustable parameters."
)
@abc.abstractmethod
def fix_inputs(self, model, fixed_inputs: dict):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
"""
raise NotImplementedError("This should be implemented by a child class.")
@abc.abstractmethod
def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
raise NotImplementedError("This has not been implemented for BoundingDomain.")
@staticmethod
def _base_output(input_shape, fill_value):
"""
Create a baseline output, assuming that the entire input is outside
the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
An array of the correct shape containing all fill_value
"""
return np.zeros(input_shape) + fill_value
def _all_out_output(self, input_shape, fill_value):
"""
Create output if all inputs are outside the domain.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
A full set of outputs for case that all inputs are outside domain.
"""
return [
self._base_output(input_shape, fill_value)
for _ in range(self._model.n_outputs)
], None
def _modify_output(self, valid_output, valid_index, input_shape, fill_value):
"""
For a single output fill in all the parts corresponding to inputs
outside the bounding box.
Parameters
----------
valid_output : numpy array
The output from the model corresponding to inputs inside the
bounding box
valid_index : numpy array
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
An output array with all the indices corresponding to inputs
outside the bounding box filled in by fill_value
"""
output = self._base_output(input_shape, fill_value)
if not output.shape:
output = np.array(valid_output)
else:
output[valid_index] = valid_output
if np.isscalar(valid_output):
output = output.item(0)
return output
def _prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value):
"""
Fill in all the outputs of the model corresponding to inputs
outside the bounding_box.
Parameters
----------
valid_outputs : list of numpy array
The list of outputs from the model corresponding to inputs
inside the bounding box
valid_index : numpy array
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
List of filled in output arrays.
"""
outputs = []
for valid_output in valid_outputs:
outputs.append(
self._modify_output(valid_output, valid_index, input_shape, fill_value)
)
return outputs
def prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value):
"""
Fill in all the outputs of the model corresponding to inputs
outside the bounding_box, adjusting any single output model so that
its output becomes a list of containing that output.
Parameters
----------
valid_outputs : list
The list of outputs from the model corresponding to inputs
inside the bounding box
valid_index : array_like
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
"""
if self._model.n_outputs == 1:
valid_outputs = [valid_outputs]
return self._prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
@staticmethod
def _get_valid_outputs_unit(valid_outputs, with_units: bool):
"""
Get the unit for outputs if one is required.
Parameters
----------
valid_outputs : list of numpy array
The list of outputs from the model corresponding to inputs
inside the bounding box
with_units : bool
whether or not a unit is required
"""
if with_units:
return getattr(valid_outputs, "unit", None)
def _evaluate_model(
self,
evaluate: Callable,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units: bool,
):
"""
Evaluate the model using the given evaluate routine.
Parameters
----------
evaluate : Callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list of numpy arrays
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : numpy array
array of all indices inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
with_units : bool
whether or not a unit is required
Returns
-------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
"""
valid_outputs = evaluate(valid_inputs)
valid_outputs_unit = self._get_valid_outputs_unit(valid_outputs, with_units)
return (
self.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value),
valid_outputs_unit,
)
def _evaluate(
self, evaluate: Callable, inputs, input_shape, fill_value, with_units: bool
):
"""Evaluate model with steps: prepare_inputs -> evaluate -> prepare_outputs.
Parameters
----------
evaluate : Callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list of numpy arrays
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : numpy array
array of all indices inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
with_units : bool
whether or not a unit is required
Returns
-------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
"""
valid_inputs, valid_index, all_out = self.prepare_inputs(input_shape, inputs)
if all_out:
return self._all_out_output(input_shape, fill_value)
else:
return self._evaluate_model(
evaluate, valid_inputs, valid_index, input_shape, fill_value, with_units
)
@staticmethod
def _set_outputs_unit(outputs, valid_outputs_unit):
"""
Set the units on the outputs
prepare_inputs -> evaluate -> prepare_outputs -> set output units.
Parameters
----------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
Returns
-------
List containing filled in output values and units
"""
if valid_outputs_unit is not None:
return Quantity(outputs, valid_outputs_unit, copy=False, subok=True)
return outputs
def evaluate(self, evaluate: Callable, inputs, fill_value):
"""
Perform full model evaluation steps:
prepare_inputs -> evaluate -> prepare_outputs -> set output units.
Parameters
----------
evaluate : callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
"""
input_shape = self._model.input_shape(inputs)
# NOTE: CompoundModel does not currently support units during
# evaluation for bounding_box so this feature is turned off
# for CompoundModel(s).
outputs, valid_outputs_unit = self._evaluate(
evaluate, inputs, input_shape, fill_value, self._model.bbox_with_units
)
return tuple(self._set_outputs_unit(outputs, valid_outputs_unit))
class ModelBoundingBox(_BoundingDomain):
"""
A model's bounding box.
Parameters
----------
intervals : dict
A dictionary containing all the intervals for each model input
keys -> input index
values -> interval for that index
model : `~astropy.modeling.Model`
The Model this bounding_box is for.
ignored : list
A list containing all the inputs (index) which will not be
checked for whether or not their elements are in/out of an interval.
order : optional, str
The ordering that is assumed for the tuple representation of this
bounding_box. Options: 'C': C/Python order, e.g. z, y, x.
(default), 'F': Fortran/mathematical notation order, e.g. x, y, z.
"""
def __init__(
self,
intervals: Dict[int, _Interval],
model,
ignored: List[int] = None,
order: str = "C",
):
super().__init__(model, ignored, order)
self._intervals = {}
if intervals != () and intervals != {}:
self._validate(intervals, order=order)
def copy(self, ignored=None):
intervals = {
index: interval.copy() for index, interval in self._intervals.items()
}
if ignored is None:
ignored = self._ignored.copy()
return ModelBoundingBox(
intervals, self._model, ignored=ignored, order=self._order
)
@property
def intervals(self) -> Dict[int, _Interval]:
"""Return bounding_box labeled using input positions."""
return self._intervals
@property
def named_intervals(self) -> Dict[str, _Interval]:
"""Return bounding_box labeled using input names."""
return {self._get_name(index): bbox for index, bbox in self._intervals.items()}
def __repr__(self):
parts = ["ModelBoundingBox(", " intervals={"]
for name, interval in self.named_intervals.items():
parts.append(f" {name}: {interval}")
parts.append(" }")
if len(self._ignored) > 0:
parts.append(f" ignored={self.ignored_inputs}")
parts.append(
f" model={self._model.__class__.__name__}(inputs={self._model.inputs})"
)
parts.append(f" order='{self._order}'")
parts.append(")")
return "\n".join(parts)
def __len__(self):
return len(self._intervals)
def __contains__(self, key):
try:
return self._get_index(key) in self._intervals or self._ignored
except (IndexError, ValueError):
return False
def has_interval(self, key):
return self._get_index(key) in self._intervals
def __getitem__(self, key):
"""Get bounding_box entries by either input name or input index."""
index = self._get_index(key)
if index in self._ignored:
return _ignored_interval
else:
return self._intervals[self._get_index(key)]
def bounding_box(self, order: str = None):
"""
Return the old tuple of tuples representation of the bounding_box
order='C' corresponds to the old bounding_box ordering
order='F' corresponds to the gwcs bounding_box ordering.
"""
if len(self._intervals) == 1:
return tuple(list(self._intervals.values())[0])
else:
order = self._get_order(order)
inputs = self._model.inputs
if order == "C":
inputs = inputs[::-1]
bbox = tuple(tuple(self[input_name]) for input_name in inputs)
if len(bbox) == 1:
bbox = bbox[0]
return bbox
def __eq__(self, value):
"""Note equality can be either with old representation or new one."""
if isinstance(value, tuple):
return self.bounding_box() == value
elif isinstance(value, ModelBoundingBox):
return (self.intervals == value.intervals) and (
self.ignored == value.ignored
)
else:
return False
def __setitem__(self, key, value):
"""Validate and store interval under key (input index or input name)."""
index = self._get_index(key)
if index in self._ignored:
self._ignored.remove(index)
self._intervals[index] = _Interval.validate(value)
def __delitem__(self, key):
"""Delete stored interval."""
index = self._get_index(key)
if index in self._ignored:
raise RuntimeError(f"Cannot delete ignored input: {key}!")
del self._intervals[index]
self._ignored.append(index)
def _validate_dict(self, bounding_box: dict):
"""Validate passing dictionary of intervals and setting them."""
for key, value in bounding_box.items():
self[key] = value
@property
def _available_input_index(self):
model_input_index = [self._get_index(_input) for _input in self._model.inputs]
return [_input for _input in model_input_index if _input not in self._ignored]
def _validate_sequence(self, bounding_box, order: str = None):
"""
Validate passing tuple of tuples representation (or related) and setting them.
"""
order = self._get_order(order)
if order == "C":
# If bounding_box is C/python ordered, it needs to be reversed
# to be in Fortran/mathematical/input order.
bounding_box = bounding_box[::-1]
for index, value in enumerate(bounding_box):
self[self._available_input_index[index]] = value
@property
def _n_inputs(self) -> int:
n_inputs = self._model.n_inputs - len(self._ignored)
if n_inputs > 0:
return n_inputs
else:
return 0
def _validate_iterable(self, bounding_box, order: str = None):
"""Validate and set any iterable representation."""
if len(bounding_box) != self._n_inputs:
raise ValueError(
f"Found {len(bounding_box)} intervals, "
f"but must have exactly {self._n_inputs}."
)
if isinstance(bounding_box, dict):
self._validate_dict(bounding_box)
else:
self._validate_sequence(bounding_box, order)
def _validate(self, bounding_box, order: str = None):
"""Validate and set any representation."""
if self._n_inputs == 1 and not isinstance(bounding_box, dict):
self[self._available_input_index[0]] = bounding_box
else:
self._validate_iterable(bounding_box, order)
@classmethod
def validate(
cls,
model,
bounding_box,
ignored: list = None,
order: str = "C",
_preserve_ignore: bool = False,
**kwargs,
):
"""
Construct a valid bounding box for a model.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be a bounding_box
bounding_box : dict, tuple
A possible representation of the bounding box
order : optional, str
The order that a tuple representation will be assumed to be
Default: 'C'
"""
if isinstance(bounding_box, ModelBoundingBox):
order = bounding_box.order
if _preserve_ignore:
ignored = bounding_box.ignored
bounding_box = bounding_box.named_intervals
new = cls({}, model, ignored=ignored, order=order)
new._validate(bounding_box)
return new
def fix_inputs(self, model, fixed_inputs: dict, _keep_ignored=False):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
keep_ignored : bool
Keep the ignored inputs of the bounding box (internal argument only)
"""
new = self.copy()
for _input in fixed_inputs.keys():
del new[_input]
if _keep_ignored:
ignored = new.ignored
else:
ignored = None
return ModelBoundingBox.validate(
model, new.named_intervals, ignored=ignored, order=new._order
)
@property
def dimension(self):
return len(self)
def domain(self, resolution, order: str = None):
inputs = self._model.inputs
order = self._get_order(order)
if order == "C":
inputs = inputs[::-1]
return [self[input_name].domain(resolution) for input_name in inputs]
def _outside(self, input_shape, inputs):
"""
Get all the input positions which are outside the bounding_box,
so that the corresponding outputs can be filled with the fill
value (default NaN).
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
outside_index : bool-numpy array
True -> position outside bounding_box
False -> position inside bounding_box
all_out : bool
if all of the inputs are outside the bounding_box
"""
all_out = False
outside_index = np.zeros(input_shape, dtype=bool)
for index, _input in enumerate(inputs):
_input = np.asanyarray(_input)
outside = np.broadcast_to(self[index].outside(_input), input_shape)
outside_index[outside] = True
if outside_index.all():
all_out = True
break
return outside_index, all_out
def _valid_index(self, input_shape, inputs):
"""
Get the indices of all the inputs inside the bounding_box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_index : numpy array
array of all indices inside the bounding box
all_out : bool
if all of the inputs are outside the bounding_box
"""
outside_index, all_out = self._outside(input_shape, inputs)
valid_index = np.atleast_1d(np.logical_not(outside_index)).nonzero()
if len(valid_index[0]) == 0:
all_out = True
return valid_index, all_out
def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
valid_index, all_out = self._valid_index(input_shape, inputs)
valid_inputs = []
if not all_out:
for _input in inputs:
if input_shape:
valid_input = np.broadcast_to(np.atleast_1d(_input), input_shape)[
valid_index
]
if np.isscalar(_input):
valid_input = valid_input.item(0)
valid_inputs.append(valid_input)
else:
valid_inputs.append(_input)
return tuple(valid_inputs), valid_index, all_out
_BaseSelectorArgument = namedtuple("_BaseSelectorArgument", "index ignore")
class _SelectorArgument(_BaseSelectorArgument):
"""
Contains a single CompoundBoundingBox slicing input.
Parameters
----------
index : int
The index of the input in the input list
ignore : bool
Whether or not this input will be ignored by the bounding box.
Methods
-------
validate :
Returns a valid SelectorArgument for a given model.
get_selector :
Returns the value of the input for use in finding the correct
bounding_box.
get_fixed_value :
Gets the slicing value from a fix_inputs set of values.
"""
def __new__(cls, index, ignore):
self = super().__new__(cls, index, ignore)
return self
@classmethod
def validate(cls, model, argument, ignored: bool = True):
"""
Construct a valid selector argument for a CompoundBoundingBox.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be an argument for.
argument : int or str
A representation of which evaluation input to use
ignored : optional, bool
Whether or not to ignore this argument in the ModelBoundingBox.
Returns
-------
Validated selector_argument
"""
return cls(get_index(model, argument), ignored)
def get_selector(self, *inputs):
"""
Get the selector value corresponding to this argument.
Parameters
----------
*inputs :
All the processed model evaluation inputs.
"""
_selector = inputs[self.index]
if isiterable(_selector):
if len(_selector) == 1:
return _selector[0]
else:
return tuple(_selector)
return _selector
def name(self, model) -> str:
"""
Get the name of the input described by this selector argument.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return get_name(model, self.index)
def pretty_repr(self, model):
"""
Get a pretty-print representation of this object.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return f"Argument(name='{self.name(model)}', ignore={self.ignore})"
def get_fixed_value(self, model, values: dict):
"""
Gets the value fixed input corresponding to this argument.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
values : dict
Dictionary of fixed inputs.
"""
if self.index in values:
return values[self.index]
else:
if self.name(model) in values:
return values[self.name(model)]
else:
raise RuntimeError(
f"{self.pretty_repr(model)} was not found in {values}"
)
def is_argument(self, model, argument) -> bool:
"""
Determine if passed argument is described by this selector argument.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
argument : int or str
A representation of which evaluation input is being used
"""
return self.index == get_index(model, argument)
def named_tuple(self, model):
"""
Get a tuple representation of this argument using the input
name from the model.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return (self.name(model), self.ignore)
class _SelectorArguments(tuple):
"""
Contains the CompoundBoundingBox slicing description.
Parameters
----------
input_ :
The SelectorArgument values
Methods
-------
validate :
Returns a valid SelectorArguments for its model.
get_selector :
Returns the selector a set of inputs corresponds to.
is_selector :
Determines if a selector is correctly formatted for this CompoundBoundingBox.
get_fixed_value :
Gets the selector from a fix_inputs set of values.
"""
_kept_ignore = None
def __new__(cls, input_: Tuple[_SelectorArgument], kept_ignore: List = None):
self = super().__new__(cls, input_)
if kept_ignore is None:
self._kept_ignore = []
else:
self._kept_ignore = kept_ignore
return self
def pretty_repr(self, model):
"""
Get a pretty-print representation of this object.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
"""
parts = ["SelectorArguments("]
for argument in self:
parts.append(f" {argument.pretty_repr(model)}")
parts.append(")")
return "\n".join(parts)
@property
def ignore(self):
"""Get the list of ignored inputs."""
ignore = [argument.index for argument in self if argument.ignore]
ignore.extend(self._kept_ignore)
return ignore
@property
def kept_ignore(self):
"""The arguments to persist in ignoring."""
return self._kept_ignore
@classmethod
def validate(cls, model, arguments, kept_ignore: List = None):
"""
Construct a valid Selector description for a CompoundBoundingBox.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
arguments :
The individual argument information
kept_ignore :
Arguments to persist as ignored
"""
inputs = []
for argument in arguments:
_input = _SelectorArgument.validate(model, *argument)
if _input.index in [this.index for this in inputs]:
raise ValueError(
f"Input: '{get_name(model, _input.index)}' has been repeated."
)
inputs.append(_input)
if len(inputs) == 0:
raise ValueError("There must be at least one selector argument.")
if isinstance(arguments, _SelectorArguments):
if kept_ignore is None:
kept_ignore = []
kept_ignore.extend(arguments.kept_ignore)
return cls(tuple(inputs), kept_ignore)
def get_selector(self, *inputs):
"""
Get the selector corresponding to these inputs.
Parameters
----------
*inputs :
All the processed model evaluation inputs.
"""
return tuple(argument.get_selector(*inputs) for argument in self)
def is_selector(self, _selector):
"""
Determine if this is a reasonable selector.
Parameters
----------
_selector : tuple
The selector to check
"""
return isinstance(_selector, tuple) and len(_selector) == len(self)
def get_fixed_values(self, model, values: dict):
"""
Gets the value fixed input corresponding to this argument.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
values : dict
Dictionary of fixed inputs.
"""
return tuple(argument.get_fixed_value(model, values) for argument in self)
def is_argument(self, model, argument) -> bool:
"""
Determine if passed argument is one of the selector arguments.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which evaluation input is being used
"""
return any(selector_arg.is_argument(model, argument) for selector_arg in self)
def selector_index(self, model, argument):
"""
Get the index of the argument passed in the selector tuples.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
for index, selector_arg in enumerate(self):
if selector_arg.is_argument(model, argument):
return index
else:
raise ValueError(
f"{argument} does not correspond to any selector argument."
)
def reduce(self, model, argument):
"""
Reduce the selector arguments by the argument given.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
arguments = list(self)
kept_ignore = [arguments.pop(self.selector_index(model, argument)).index]
kept_ignore.extend(self._kept_ignore)
return _SelectorArguments.validate(model, tuple(arguments), kept_ignore)
def add_ignore(self, model, argument):
"""
Add argument to the kept_ignore list.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
if self.is_argument(model, argument):
raise ValueError(
f"{argument}: is a selector argument and cannot be ignored."
)
kept_ignore = [get_index(model, argument)]
return _SelectorArguments.validate(model, self, kept_ignore)
def named_tuple(self, model):
"""
Get a tuple of selector argument tuples using input names.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
"""
return tuple(selector_arg.named_tuple(model) for selector_arg in self)
class CompoundBoundingBox(_BoundingDomain):
"""
A model's compound bounding box.
Parameters
----------
bounding_boxes : dict
A dictionary containing all the ModelBoundingBoxes that are possible
keys -> _selector (extracted from model inputs)
values -> ModelBoundingBox
model : `~astropy.modeling.Model`
The Model this compound bounding_box is for.
selector_args : _SelectorArguments
A description of how to extract the selectors from model inputs.
create_selector : optional
A method which takes in the selector and the model to return a
valid bounding corresponding to that selector. This can be used
to construct new bounding_boxes for previously undefined selectors.
These new boxes are then stored for future lookups.
order : optional, str
The ordering that is assumed for the tuple representation of the
bounding_boxes.
"""
def __init__(
self,
bounding_boxes: Dict[Any, ModelBoundingBox],
model,
selector_args: _SelectorArguments,
create_selector: Callable = None,
ignored: List[int] = None,
order: str = "C",
):
super().__init__(model, ignored, order)
self._create_selector = create_selector
self._selector_args = _SelectorArguments.validate(model, selector_args)
self._bounding_boxes = {}
self._validate(bounding_boxes)
def copy(self):
bounding_boxes = {
selector: bbox.copy(self.selector_args.ignore)
for selector, bbox in self._bounding_boxes.items()
}
return CompoundBoundingBox(
bounding_boxes,
self._model,
selector_args=self._selector_args,
create_selector=copy.deepcopy(self._create_selector),
order=self._order,
)
def __repr__(self):
parts = ["CompoundBoundingBox(", " bounding_boxes={"]
# bounding_boxes
for _selector, bbox in self._bounding_boxes.items():
bbox_repr = bbox.__repr__().split("\n")
parts.append(f" {_selector} = {bbox_repr.pop(0)}")
for part in bbox_repr:
parts.append(f" {part}")
parts.append(" }")
# selector_args
selector_args_repr = self.selector_args.pretty_repr(self._model).split("\n")
parts.append(f" selector_args = {selector_args_repr.pop(0)}")
for part in selector_args_repr:
parts.append(f" {part}")
parts.append(")")
return "\n".join(parts)
@property
def bounding_boxes(self) -> Dict[Any, ModelBoundingBox]:
return self._bounding_boxes
@property
def selector_args(self) -> _SelectorArguments:
return self._selector_args
@selector_args.setter
def selector_args(self, value):
self._selector_args = _SelectorArguments.validate(self._model, value)
warnings.warn(
"Overriding selector_args may cause problems you should re-validate "
"the compound bounding box before use!",
RuntimeWarning,
)
@property
def named_selector_tuple(self) -> tuple:
return self._selector_args.named_tuple(self._model)
@property
def create_selector(self):
return self._create_selector
@staticmethod
def _get_selector_key(key):
if isiterable(key):
return tuple(key)
else:
return (key,)
def __setitem__(self, key, value):
_selector = self._get_selector_key(key)
if not self.selector_args.is_selector(_selector):
raise ValueError(f"{_selector} is not a selector!")
ignored = self.selector_args.ignore + self.ignored
self._bounding_boxes[_selector] = ModelBoundingBox.validate(
self._model, value, ignored, order=self._order
)
def _validate(self, bounding_boxes: dict):
for _selector, bounding_box in bounding_boxes.items():
self[_selector] = bounding_box
def __eq__(self, value):
if isinstance(value, CompoundBoundingBox):
return (
self.bounding_boxes == value.bounding_boxes
and self.selector_args == value.selector_args
and self.create_selector == value.create_selector
)
else:
return False
@classmethod
def validate(
cls,
model,
bounding_box: dict,
selector_args=None,
create_selector=None,
ignored: list = None,
order: str = "C",
_preserve_ignore: bool = False,
**kwarg,
):
"""
Construct a valid compound bounding box for a model.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be a bounding_box
bounding_box : dict
Dictionary of possible bounding_box representations
selector_args : optional
Description of the selector arguments
create_selector : optional, callable
Method for generating new selectors
order : optional, str
The order that a tuple representation will be assumed to be
Default: 'C'
"""
if isinstance(bounding_box, CompoundBoundingBox):
if selector_args is None:
selector_args = bounding_box.selector_args
if create_selector is None:
create_selector = bounding_box.create_selector
order = bounding_box.order
if _preserve_ignore:
ignored = bounding_box.ignored
bounding_box = bounding_box.bounding_boxes
if selector_args is None:
raise ValueError(
"Selector arguments must be provided "
"(can be passed as part of bounding_box argument)"
)
return cls(
bounding_box,
model,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
)
def __contains__(self, key):
return key in self._bounding_boxes
def _create_bounding_box(self, _selector):
self[_selector] = self._create_selector(_selector, model=self._model)
return self[_selector]
def __getitem__(self, key):
_selector = self._get_selector_key(key)
if _selector in self:
return self._bounding_boxes[_selector]
elif self._create_selector is not None:
return self._create_bounding_box(_selector)
else:
raise RuntimeError(f"No bounding box is defined for selector: {_selector}.")
def _select_bounding_box(self, inputs) -> ModelBoundingBox:
_selector = self.selector_args.get_selector(*inputs)
return self[_selector]
def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
bounding_box = self._select_bounding_box(inputs)
return bounding_box.prepare_inputs(input_shape, inputs)
def _matching_bounding_boxes(self, argument, value) -> Dict[Any, ModelBoundingBox]:
selector_index = self.selector_args.selector_index(self._model, argument)
matching = {}
for selector_key, bbox in self._bounding_boxes.items():
if selector_key[selector_index] == value:
new_selector_key = list(selector_key)
new_selector_key.pop(selector_index)
if bbox.has_interval(argument):
new_bbox = bbox.fix_inputs(
self._model, {argument: value}, _keep_ignored=True
)
else:
new_bbox = bbox.copy()
matching[tuple(new_selector_key)] = new_bbox
if len(matching) == 0:
raise ValueError(
f"Attempting to fix input {argument}, but there are no "
f"bounding boxes for argument value {value}."
)
return matching
def _fix_input_selector_arg(self, argument, value):
matching_bounding_boxes = self._matching_bounding_boxes(argument, value)
if len(self.selector_args) == 1:
return matching_bounding_boxes[()]
else:
return CompoundBoundingBox(
matching_bounding_boxes,
self._model,
self.selector_args.reduce(self._model, argument),
)
def _fix_input_bbox_arg(self, argument, value):
bounding_boxes = {}
for selector_key, bbox in self._bounding_boxes.items():
bounding_boxes[selector_key] = bbox.fix_inputs(
self._model, {argument: value}, _keep_ignored=True
)
return CompoundBoundingBox(
bounding_boxes,
self._model,
self.selector_args.add_ignore(self._model, argument),
)
def fix_inputs(self, model, fixed_inputs: dict):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
"""
fixed_input_keys = list(fixed_inputs.keys())
argument = fixed_input_keys.pop()
value = fixed_inputs[argument]
if self.selector_args.is_argument(self._model, argument):
bbox = self._fix_input_selector_arg(argument, value)
else:
bbox = self._fix_input_bbox_arg(argument, value)
if len(fixed_input_keys) > 0:
new_fixed_inputs = fixed_inputs.copy()
del new_fixed_inputs[argument]
bbox = bbox.fix_inputs(model, new_fixed_inputs)
if isinstance(bbox, CompoundBoundingBox):
selector_args = bbox.named_selector_tuple
bbox_dict = bbox
elif isinstance(bbox, ModelBoundingBox):
selector_args = None
bbox_dict = bbox.named_intervals
return bbox.__class__.validate(
model, bbox_dict, order=bbox.order, selector_args=selector_args
)
|
9bf87b0d4b5644274ce5248fc3e5198f60679d9c0435c903c7ae25d53da2c5b7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from .core import Fittable1DModel, Fittable2DModel
from .parameters import InputParameterError, Parameter
from .utils import ellipse_extent
__all__ = [
"AiryDisk2D",
"Moffat1D",
"Moffat2D",
"Box1D",
"Box2D",
"Const1D",
"Const2D",
"Ellipse2D",
"Disk2D",
"Gaussian1D",
"Gaussian2D",
"Linear1D",
"Lorentz1D",
"RickerWavelet1D",
"RickerWavelet2D",
"RedshiftScaleFactor",
"Multiply",
"Planar2D",
"Scale",
"Sersic1D",
"Sersic2D",
"Shift",
"Sine1D",
"Cosine1D",
"Tangent1D",
"ArcSine1D",
"ArcCosine1D",
"ArcTangent1D",
"Trapezoid1D",
"TrapezoidDisk2D",
"Ring2D",
"Voigt1D",
"KingProjectedAnalytic1D",
"Exponential1D",
"Logarithmic1D",
]
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Gaussian"
)
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Standard deviation of the Gaussian",
)
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(-0.5 * (x - mean) ** 2 / stddev**2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev**2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev**2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev**3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
return {self.inputs[0]: self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"mean": inputs_unit[self.inputs[0]],
"stddev": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise. Must be `None` if a covariance matrix
(``cov_matrix``) is provided. If no ``cov_matrix`` is given,
`None` means the default value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(
\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}
\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(
default=0, description="Peak position (along x axis) of Gaussian"
)
y_mean = Parameter(
default=0, description="Peak position (along y axis) of Gaussian"
)
x_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along x axis)"
)
y_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along y axis)"
)
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a "
"float (in radians) or a "
"|Quantity| angle (optional)"
),
)
def __init__(
self,
amplitude=amplitude.default,
x_mean=x_mean.default,
y_mean=y_mean.default,
x_stddev=None,
y_stddev=None,
theta=None,
cov_matrix=None,
**kwargs,
):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError(
"Cannot specify both cov_matrix and x/y_stddev/theta"
)
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault("bounds", {})
kwargs["bounds"].setdefault("x_stddev", (FLOAT_EPSILON, None))
kwargs["bounds"].setdefault("y_stddev", (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude,
x_mean=x_mean,
y_mean=y_mean,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
**kwargs,
)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``.
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-5.5, upper=5.5)
y: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-2.0, upper=2.0)
y: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
dx, dy = ellipse_extent(a, b, self.theta)
return (
(self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx),
)
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function."""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(
-((a * xdiff**2) + (b * xdiff * ydiff) + (c * ydiff**2))
)
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters."""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2.0 * theta)
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xstd3 = x_stddev**3
ystd3 = y_stddev**3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff**2
ydiff2 = ydiff**2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2)))
da_dtheta = sint * cost * ((1.0 / ystd2) - (1.0 / xstd2))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2.0 * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2.0 * c * ydiff))
dg_dx_stddev = g * (
-(
da_dx_stddev * xdiff2
+ db_dx_stddev * xdiff * ydiff
+ dc_dx_stddev * ydiff2
)
)
dg_dy_stddev = g * (
-(
da_dy_stddev * xdiff2
+ db_dy_stddev * xdiff * ydiff
+ dc_dy_stddev * ydiff2
)
)
dg_dtheta = g * (
-(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2)
)
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
return {self.inputs[0]: self.x_mean.unit, self.inputs[1]: self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_mean": inputs_unit[self.inputs[0]],
"y_mean": inputs_unit[self.inputs[0]],
"x_stddev": inputs_unit[self.inputs[0]],
"y_stddev": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.unit is None:
return None
return {self.inputs[0]: self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function."""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.offset) for x in self.bounding_box
)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function."""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model."""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter."""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"offset": outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.unit is None:
return None
return {self.inputs[0]: self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function."""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function."""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description="Redshift", default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function."""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative."""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model."""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.z) for x in self.bounding_box.bounding_box()
)
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
return amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)
)
@property
def input_units(self):
if self.r_eff.unit is None:
return None
return {self.inputs[0]: self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_eff": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class _Trigonometric1D(Fittable1DModel):
"""
Base class for one dimensional trigonometric and inverse trigonometric models.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@property
def input_units(self):
if self.frequency.unit is None:
return None
return {self.inputs[0]: 1.0 / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": inputs_unit[self.inputs[0]] ** -1,
"amplitude": outputs_unit[self.outputs[0]],
}
class Sine1D(_Trigonometric1D):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative."""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (
TWOPI * x * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = TWOPI * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Sine."""
return ArcSine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Cosine1D(_Trigonometric1D):
"""
One dimensional Cosine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Cosine1D
plt.figure()
s1 = Cosine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Cosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.cos(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Cosine model derivative."""
d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase)
d_frequency = -(
TWOPI * x * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = -(TWOPI * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Cosine."""
return ArcCosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Tangent1D(_Trigonometric1D):
"""
One dimensional Tangent model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Sine1D, Cosine1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p)
Note that the tangent function is undefined for inputs of the form
pi/2 + n*pi for all integers n. Thus thus the default bounding box
has been restricted to:
.. math:: [(-1/4 - p)/f, (1/4 - p)/f]
which is the smallest interval for the tangent function to be continuous
on.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Tangent1D
plt.figure()
s1 = Tangent1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Tangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.tan(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Tangent model derivative."""
sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase)) ** 2
d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase)
d_frequency = TWOPI * x * amplitude * sec
d_phase = TWOPI * amplitude * sec
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Tangent."""
return ArcTangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
bbox = [
(-1 / 4 - self.phase) / self.frequency,
(1 / 4 - self.phase) / self.frequency,
]
if self.frequency.unit is not None:
bbox = bbox / self.frequency.unit
return bbox
class _InverseTrigonometric1D(_Trigonometric1D):
"""
Base class for one dimensional inverse trigonometric models.
"""
@property
def input_units(self):
if self.amplitude.unit is None:
return None
return {self.inputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": outputs_unit[self.outputs[0]] ** -1,
"amplitude": inputs_unit[self.inputs[0]],
}
class ArcSine1D(_InverseTrigonometric1D):
"""
One dimensional ArcSine model returning values between -pi/2 and pi/2
only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Sine
frequency : float
Oscillation frequency for corresponding Sine
phase : float
Oscillation phase for corresponding Sine
See Also
--------
Sine1D, ArcCosine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f
The arcsin function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcSine1D
plt.figure()
s1 = ArcSine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcSine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_sine = np.arcsin(argument) / TWOPI
return (arc_sine - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcSine model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcSine."""
return Sine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcCosine1D(_InverseTrigonometric1D):
"""
One dimensional ArcCosine returning values between 0 and pi only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Cosine
frequency : float
Oscillation frequency for corresponding Cosine
phase : float
Oscillation phase for corresponding Cosine
See Also
--------
Cosine1D, ArcSine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f
The arccos function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcCosine1D
plt.figure()
s1 = ArcCosine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, 0, np.pi])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arccos(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model derivative."""
d_amplitude = x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcCosine."""
return Cosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcTangent1D(_InverseTrigonometric1D):
"""
One dimensional ArcTangent model returning values between -pi/2 and
pi/2 only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Tangent
frequency : float
Oscillation frequency for corresponding Tangent
phase : float
Oscillation phase for corresponding Tangent
See Also
--------
Tangent1D, ArcSine1D, ArcCosine1D
Notes
-----
Model formula:
.. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcTangent1D
plt.figure()
s1 = ArcTangent1D(amplitude=1, frequency=.25)
r=np.arange(-10, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-10, 10, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arctan(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * (1 + (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of ArcTangent."""
return Tangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function."""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters."""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope**-1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
return {self.inputs[0]: self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit[self.outputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
}
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function."""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters."""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit["z"],
"slope_x": outputs_unit["z"] / inputs_unit["x"],
"slope_y": outputs_unit["z"] / inputs_unit["y"],
}
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value - for a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm)
x_0 : float or `~astropy.units.Quantity`.
Position of the peak
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM)
See Also
--------
Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is half of given FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function."""
return amplitude * ((fwhm / 2.0) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.0) ** 2)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters."""
d_amplitude = fwhm**2 / (fwhm**2 + (x - x_0) ** 2)
d_x_0 = (
amplitude * d_amplitude * (2 * x - 2 * x_0) / (fwhm**2 + (x - x_0) ** 2)
)
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float or `~astropy.units.Quantity`
Position of the peak
amplitude_L : float or `~astropy.units.Quantity`.
The Lorentzian amplitude (peak of the associated Lorentz function)
- for a normalized profile (integrating to 1), set
amplitude_L = 2 / (np.pi * fwhm_L)
fwhm_L : float or `~astropy.units.Quantity`
The Lorentzian full width at half maximum
fwhm_G : float or `~astropy.units.Quantity`.
The Gaussian full width at half maximum
method : str, optional
Algorithm for computing the complex error function; one of
'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or
'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and
reference in accuracy).
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided
consistently with compatible units or as unitless numbers.
Voigt function is calculated as real part of the complex error function computed from either
Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following
Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or
`~scipy.special.wofz` (implementing 'Faddeeva.cc').
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0, description="Position of the peak")
amplitude_L = Parameter(default=1, description="The Lorentzian amplitude")
fwhm_L = Parameter(
default=2 / np.pi, description="The Lorentzian full width at half maximum"
)
fwhm_G = Parameter(
default=np.log(2), description="The Gaussian full width at half maximum"
)
sqrt_pi = np.sqrt(np.pi)
sqrt_ln2 = np.sqrt(np.log(2))
sqrt_ln2pi = np.sqrt(np.log(2) * np.pi)
_last_z = np.zeros(1, dtype=complex)
_last_w = np.zeros(1, dtype=float)
_faddeeva = None
def __init__(
self,
x_0=x_0.default,
amplitude_L=amplitude_L.default,
fwhm_L=fwhm_L.default,
fwhm_G=fwhm_G.default,
method="humlicek2",
**kwargs,
):
if str(method).lower() in ("wofz", "scipy"):
from scipy.special import wofz
self._faddeeva = wofz
elif str(method).lower() == "humlicek2":
self._faddeeva = self._hum2zpf16c
else:
raise ValueError(
f"Not a valid method for Voigt1D Faddeeva function: {method}."
)
self.method = self._faddeeva.__name__
super().__init__(
x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs
)
def _wrap_wofz(self, z):
"""Call complex error (Faddeeva) function w(z) implemented by algorithm `method`;
cache results for consecutive calls from `evaluate`, `fit_deriv`.
"""
if z.shape == self._last_z.shape and np.allclose(
z, self._last_z, rtol=1.0e-14, atol=1.0e-15
):
return self._last_w
self._last_w = self._faddeeva(z)
self._last_z = z
return self._last_w
def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""One dimensional Voigt function scaled to Lorentz peak amplitude."""
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G
# The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ;
# for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L
return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L
def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""
Derivative of the one dimensional Voigt function with respect to parameters.
"""
s = self.sqrt_ln2 / fwhm_G
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s
# V * constant from McLean implementation (== their Voigt function)
w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi
# Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L)
dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L
return [
-dwdz.real * 2 * s,
w.real / amplitude_L,
w.real / fwhm_L - dwdz.imag * s,
(-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G,
]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm_L": inputs_unit[self.inputs[0]],
"fwhm_G": inputs_unit[self.inputs[0]],
"amplitude_L": outputs_unit[self.outputs[0]],
}
@staticmethod
def _hum2zpf16c(z, s=10.0):
"""Complex error function w(z = x + iy) combining Humlicek's rational approximations.
|x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II;
else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35
Version using a mask and np.place;
single complex argument version of Franz Schreier's cpfX.hum2zpf16m.
Originally licensed under a 3-clause BSD style license - see
https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py
"""
# Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35
# fmt: off
AA = np.array(
[
+46236.3358828121, -147726.58393079657j,
-206562.80451354137, 281369.1590631087j,
+183092.74968253175, -184787.96830696272j,
-66155.39578477248, 57778.05827983565j,
+11682.770904216826, -9442.402767960672j,
-1052.8438624933142, 814.0996198624186j,
+45.94499030751872, -34.59751573708725j,
-0.7616559377907136, 0.5641895835476449j,
]
) # 1j/sqrt(pi) to the 12. digit
bb = np.array(
[
+7918.06640624997,
-126689.0625,
+295607.8125,
-236486.25,
+84459.375,
-15015.0,
+1365.0,
-60.0,
+1.0,
]
)
# fmt: on
sqrt_piinv = 1.0 / np.sqrt(np.pi)
zz = z * z
w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz * (zz - 3.0))
if np.any(z.imag < s):
mask = abs(z.real) + z.imag < s # returns true for interior points
# returns small complex array covering only the interior region
Z = z[np.where(mask)] + 1.35j
ZZ = Z * Z
# fmt: off
# Recursive algorithms for the polynomials in Z with coefficients AA, bb
# numer = 0.0
# for A in AA[::-1]:
# numer = numer * Z + A
# Explicitly unrolled above loop for speed
numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z +
AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z +
AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0])
# denom = 0.0
# for b in bb[::-1]:
# denom = denom * ZZ + b
# Explicitly unrolled above loop for speed
denom = (((((((ZZ + bb[7])*ZZ + bb[6])*ZZ + bb[5])*ZZ+bb[4])*ZZ + bb[3])*ZZ +
bb[2])*ZZ + bb[1])*ZZ + bb[0]
# fmt: on
np.place(w, mask, numer / denom)
return w
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters."""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the ellipse", mag=True)
x_0 = Parameter(default=0, description="X position of the center of the disk.")
y_0 = Parameter(default=0, description="Y position of the center of the disk.")
a = Parameter(default=1, description="The length of the semimajor axis")
b = Parameter(default=1, description="The length of the semiminor axis")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = ((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.0
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"a": inputs_unit[self.inputs[0]],
"b": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Value of disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of the disk")
y_0 = Parameter(default=0, description="Y position of center of the disk")
R_0 = Parameter(default=1, description="Radius of the disk")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0**2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return (
(self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0),
)
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(
self,
amplitude=amplitude.default,
x_0=x_0.default,
y_0=y_0.default,
r_in=None,
width=None,
r_out=None,
**kwargs,
):
if (r_in is None) and (r_out is None) and (width is None):
r_in = self.r_in.default
width = self.width.default
elif (r_in is not None) and (r_out is None) and (width is None):
width = self.width.default
elif (r_in is None) and (r_out is not None) and (width is None):
r_in = self.r_in.default
width = r_out - r_in
elif (r_in is None) and (r_out is None) and (width is not None):
r_in = self.r_in.default
elif (r_in is not None) and (r_out is not None) and (width is None):
width = r_out - r_in
elif (r_in is None) and (r_out is not None) and (width is not None):
r_in = r_out - width
elif (r_in is not None) and (r_out is not None) and (width is not None):
if np.any(width != (r_out - r_in)):
raise InputParameterError("Width must be r_out - r_in")
if np.any(r_in < 0) or np.any(width < 0):
raise InputParameterError(f"{r_in=} and {width=} must both be >=0")
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in**2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_in": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A", mag=True)
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function."""
inside = np.logical_and(x >= x_0 - width / 2.0, x <= x_0 + width / 2.0)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Amplitude", mag=True)
x_0 = Parameter(
default=0, description="X position of the center of the box function"
)
y_0 = Parameter(
default=0, description="Y position of the center of the box function"
)
x_width = Parameter(default=1, description="Width in x direction of the box")
y_width = Parameter(default=1, description="Width in y direction of the box")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function."""
x_range = np.logical_and(x >= x_0 - x_width / 2.0, x <= x_0 + x_width / 2.0)
y_range = np.logical_and(y >= y_0 - y_width / 2.0, y <= y_0 + y_width / 2.0)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[1]],
"x_width": inputs_unit[self.inputs[0]],
"y_width": inputs_unit[self.inputs[1]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="Center position of the trapezoid")
width = Parameter(default=1, description="Width of constant part of the trapezoid")
slope = Parameter(default=1, description="Slope of the tails of trapezoid")
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function."""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.0
x3 = x_0 + width / 2.0
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="X position of the center of the trapezoid")
y_0 = Parameter(default=0, description="Y position of the center of the trapezoid")
R_0 = Parameter(default=1, description="Radius of constant part of trapezoid")
slope = Parameter(
default=1, description="Slope of tails of trapezoid in x direction"
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function."""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit["x"] != inputs_unit["y"]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet1D(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function."""
xx_ww = (x - x_0) ** 2 / (2 * sigma**2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet2D(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function."""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma**2)
return amplitude * (1 - rr_ww) * np.exp(-rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[
\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}
\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Airy function"
)
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(
default=1,
description="The radius of the Airy disk (radius of first zero crossing)",
)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function."""
if cls._rz is None:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False, subok=True)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"radius": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function."""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters."""
fac = 1 + (x - x_0) ** 2 / gamma**2
d_A = fac ** (-alpha)
d_x_0 = 2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma**2)
d_gamma = 2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma**3)
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the model")
x_0 = Parameter(
default=0, description="X position of the maximum of the Moffat model"
)
y_0 = Parameter(
default=0, description="Y position of the maximum of the Moffat model"
)
gamma = Parameter(default=1, description="Core width of the Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = 2 * amplitude * alpha * d_A * (x - x_0) / (gamma**2 * (1 + rr_gg))
d_y_0 = 2 * amplitude * alpha * d_A * (y - y_0) / (gamma**2 * (1 + rr_gg))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = 2 * amplitude * alpha * d_A * rr_gg / (gamma * (1 + rr_gg))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{
-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]
\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (2n,b_n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
x_0 = Parameter(default=0, description="X position of the center")
y_0 = Parameter(default=0, description="Y position of the center")
ellip = Parameter(default=0, description="Ellipticity")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
bn = cls._gammaincinv(2.0 * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_eff": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class KingProjectedAnalytic1D(Fittable1DModel):
"""
Projected (surface density) analytic King Model.
Parameters
----------
amplitude : float
Amplitude or scaling factor.
r_core : float
Core radius (f(r_c) ~ 0.5 f_0)
r_tide : float
Tidal radius.
Notes
-----
This model approximates a King model with an analytic function. The derivation of this
equation can be found in King '62 (equation 14). This is just an approximation of the
full model and the parameters derived from this model should be taken with caution.
It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2.
Model formula:
.. math::
f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} -
\\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import KingProjectedAnalytic1D
import matplotlib.pyplot as plt
plt.figure()
rt_list = [1, 2, 5, 10, 20]
for rt in rt_list:
r = np.linspace(0.1, rt, 100)
mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt)
sig = mod(r)
plt.loglog(r, sig/sig[0], label=f"c ~ {mod.concentration:0.2f}")
plt.xlabel("r")
plt.ylabel(r"$\\sigma/\\sigma_0$")
plt.legend()
plt.show()
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K
"""
amplitude = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Amplitude or scaling factor",
)
r_core = Parameter(
default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius"
)
r_tide = Parameter(
default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius"
)
@property
def concentration(self):
"""Concentration parameter of the king model."""
return np.log10(np.abs(self.r_tide / self.r_core))
@staticmethod
def evaluate(x, amplitude, r_core, r_tide):
"""
Analytic King model function.
"""
result = (
amplitude
* r_core**2
* (
1 / np.sqrt(x**2 + r_core**2)
- 1 / np.sqrt(r_tide**2 + r_core**2)
)
** 2
)
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
result[bounds] = result[bounds] * 0.0
return result
@staticmethod
def fit_deriv(x, amplitude, r_core, r_tide):
"""
Analytic King model function derivatives.
"""
d_amplitude = (
r_core**2
* (
1 / np.sqrt(x**2 + r_core**2)
- 1 / np.sqrt(r_tide**2 + r_core**2)
)
** 2
)
d_r_core = (
2
* amplitude
* r_core**2
* (
r_core / (r_core**2 + r_tide**2) ** (3 / 2)
- r_core / (r_core**2 + x**2) ** (3 / 2)
)
* (
1.0 / np.sqrt(r_core**2 + x**2)
- 1.0 / np.sqrt(r_core**2 + r_tide**2)
)
+ 2
* amplitude
* r_core
* (
1.0 / np.sqrt(r_core**2 + x**2)
- 1.0 / np.sqrt(r_core**2 + r_tide**2)
)
** 2
)
d_r_tide = (
2
* amplitude
* r_core**2
* r_tide
* (
1.0 / np.sqrt(r_core**2 + x**2)
- 1.0 / np.sqrt(r_core**2 + r_tide**2)
)
) / (r_core**2 + r_tide**2) ** (3 / 2)
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
d_amplitude[bounds] = d_amplitude[bounds] * 0
d_r_core[bounds] = d_r_core[bounds] * 0
d_r_tide[bounds] = d_r_tide[bounds] * 0
return [d_amplitude, d_r_core, d_r_tide]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
The model is not defined for r > r_tide.
``(r_low, r_high)``
"""
return (0 * self.r_tide, 1 * self.r_tide)
@property
def input_units(self):
if self.r_core.unit is None:
return None
return {self.inputs[0]: self.r_core.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_core": inputs_unit[self.inputs[0]],
"r_tide": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Logarithmic1D(Fittable1DModel):
"""
One dimensional logarithmic model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Exponential1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.log(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
d_amplitude = np.log(x / tau)
d_tau = np.zeros(x.shape) - (amplitude / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Exponential1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Exponential1D(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
"""Derivative with respect to parameters."""
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
"""tau cannot be 0."""
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
|
42c567ad3ae677aca3041015f65261f14bf23ed3ef2cabc92b7fbeb01f8dab9f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
Implements projections--particularly sky projections defined in WCS Paper II
[1]_.
All angles are set and and displayed in degrees but internally computations are
performed in radians. All functions expect inputs and outputs degrees.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import abc
from itertools import chain, product
import numpy as np
from astropy import units as u
from astropy import wcs
from .core import Model
from .parameters import InputParameterError, Parameter
from .utils import _to_orig_unit, _to_radian
# List of tuples of the form
# (long class name without suffix, short WCSLIB projection code):
_PROJ_NAME_CODE = [
("ZenithalPerspective", "AZP"),
("SlantZenithalPerspective", "SZP"),
("Gnomonic", "TAN"),
("Stereographic", "STG"),
("SlantOrthographic", "SIN"),
("ZenithalEquidistant", "ARC"),
("ZenithalEqualArea", "ZEA"),
("Airy", "AIR"),
("CylindricalPerspective", "CYP"),
("CylindricalEqualArea", "CEA"),
("PlateCarree", "CAR"),
("Mercator", "MER"),
("SansonFlamsteed", "SFL"),
("Parabolic", "PAR"),
("Molleweide", "MOL"),
("HammerAitoff", "AIT"),
("ConicPerspective", "COP"),
("ConicEqualArea", "COE"),
("ConicEquidistant", "COD"),
("ConicOrthomorphic", "COO"),
("BonneEqualArea", "BON"),
("Polyconic", "PCO"),
("TangentialSphericalCube", "TSC"),
("COBEQuadSphericalCube", "CSC"),
("QuadSphericalCube", "QSC"),
("HEALPix", "HPX"),
("HEALPixPolar", "XPH"),
]
_NOT_SUPPORTED_PROJ_CODES = ["ZPN"]
_PROJ_NAME_CODE_MAP = dict(_PROJ_NAME_CODE)
projcodes = [code for _, code in _PROJ_NAME_CODE]
__all__ = [
"Projection",
"Pix2SkyProjection",
"Sky2PixProjection",
"Zenithal",
"Cylindrical",
"PseudoCylindrical",
"Conic",
"PseudoConic",
"QuadCube",
"HEALPix",
"AffineTransformation2D",
"projcodes",
] + list(map("_".join, product(["Pix2Sky", "Sky2Pix"], chain(*_PROJ_NAME_CODE))))
class _ParameterDS(Parameter):
"""
Same as `Parameter` but can indicate its modified status via the ``dirty``
property. This flag also gets set automatically when a parameter is
modified.
This ability to track parameter's modified status is needed for automatic
update of WCSLIB's prjprm structure (which may be a more-time intensive
operation) *only as required*.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dirty = True
def validate(self, value):
super().validate(value)
self.dirty = True
class Projection(Model):
"""Base class for all sky projections."""
# Radius of the generating sphere.
# This sets the circumference to 360 deg so that arc length is measured in deg.
r0 = 180 * u.deg / np.pi
_separable = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj = wcs.Prjprm()
@property
@abc.abstractmethod
def inverse(self):
"""
Inverse projection--all projection models must provide an inverse.
"""
@property
def prjprm(self):
"""WCSLIB ``prjprm`` structure."""
self._update_prj()
return self._prj
def _update_prj(self):
"""
A default updater for projection's pv.
.. warning::
This method assumes that PV0 is never modified. If a projection
that uses PV0 is ever implemented in this module, that projection
class should override this method.
.. warning::
This method assumes that the order in which PVi values (i>0)
are to be assigned is identical to the order of model parameters
in ``param_names``. That is, pv[1] = model.parameters[0], ...
"""
if not self.param_names:
return
pv = []
dirty = False
for p in self.param_names:
param = getattr(self, p)
pv.append(float(param.value))
dirty |= param.dirty
param.dirty = False
if dirty:
self._prj.pv = None, *pv
self._prj.set()
class Pix2SkyProjection(Projection):
"""Base class for all Pix2Sky projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split("_")[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# with no parameters:
self._prj.set()
self.inputs = ("x", "y")
self.outputs = ("phi", "theta")
@property
def input_units(self):
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def evaluate(self, x, y, *args, **kwargs):
self._update_prj()
return self._prj.prjx2s(x, y)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Sky2PixProjection(Projection):
"""Base class for all Sky2Pix projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split("_")[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# without parameters:
self._prj.set()
self.inputs = ("phi", "theta")
self.outputs = ("x", "y")
@property
def input_units(self):
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def evaluate(self, phi, theta, *args, **kwargs):
self._update_prj()
return self._prj.prjs2x(phi, theta)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Zenithal(Projection):
r"""Base class for all Zenithal projections.
Zenithal (or azimuthal) projections map the sphere directly onto a
plane. All zenithal projections are specified by defining the
radius as a function of native latitude, :math:`R_\theta`.
The pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg(-y, x) \\
R_\theta &= \sqrt{x^2 + y^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin \phi \\
y &= R_\theta \cos \phi
"""
class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Zenithal perspective projection - pixel to sky.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
\phi &= \arg(-y \cos \gamma, x) \\
\theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
where:
.. math::
\psi &= \arg(\rho, 1) \\
\omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
\rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Look angle γ in degrees (Default = 0°)",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
x &= R \sin \phi \\
y &= -R \sec \gamma \cos \theta
where:
.. math::
R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}
{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Look angle γ in degrees (Default=0°)",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Slant zenithal perspective projection - pixel to sky.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees (Default=0°)",
)
theta0 = _ParameterDS(
default=90.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees (Default=0°)",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees",
)
theta0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal):
r"""
Gnomonic projection - pixel to sky.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right)
"""
class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal):
r"""
Gnomonic Projection - sky to pixel.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cot \theta
"""
class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal):
r"""
Stereographic Projection - pixel to sky.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right)
"""
class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal):
r"""
Stereographic Projection - sky to pixel.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta}
"""
class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal):
r"""
Slant orthographic projection - pixel to sky.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
\theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right)
The parameters :math:`\xi` and :math:`\eta` are defined from the
reference point :math:`(\phi_c, \theta_c)` as:
.. math::
\xi &= \cot \theta_c \sin \phi_c \\
\eta &= - \cot \theta_c \cos \phi_c
Parameters
----------
xi : float
Obliqueness parameter, ξ. Default is 0.0.
eta : float
Obliqueness parameter, η. Default is 0.0.
"""
xi = _ParameterDS(default=0.0, description="Obliqueness parameter")
eta = _ParameterDS(default=0.0, description="Obliqueness parameter")
class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal):
r"""
Slant orthographic projection - sky to pixel.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cos \theta
But more specifically are:
.. math::
x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\
y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)]
"""
xi = _ParameterDS(default=0.0)
eta = _ParameterDS(default=0.0)
class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - R_\theta
"""
class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = 90^\circ - \theta
"""
class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right)
"""
class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\
&= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right)
"""
class Pix2Sky_Airy(Pix2SkyProjection, Zenithal):
r"""
Airy projection - pixel to sky.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(default=90.0)
class Sky2Pix_Airy(Sky2PixProjection, Zenithal):
r"""
Airy - sky to pixel.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} +
\frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right)
where:
.. math::
\xi &= \frac{90^\circ - \theta}{2} \\
\xi_b &= \frac{90^\circ - \theta_b}{2}
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(
default=90.0,
description="The latitude at which to minimize the error,in degrees",
)
class Cylindrical(Projection):
r"""Base class for Cylindrical projections.
Cylindrical projections are so-named because the surface of
projection is a cylinder.
"""
_separable = True
class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical perspective - pixel to sky.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\lambda} \\
\theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
where:
.. math::
\eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 1.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
mu = _ParameterDS(default=1.0)
lam = _ParameterDS(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError("CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError("CYP projection is not defined for lambda = -mu")
class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical):
r"""
Cylindrical Perspective - sky to pixel.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
x &= \lambda \phi \\
y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 0.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
mu = _ParameterDS(
default=1.0, description="Distance from center of sphere in spherical radii"
)
lam = _ParameterDS(
default=1.0, description="Radius of the cylinder in spherical radii"
)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError("CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError("CYP projection is not defined for lambda = -mu")
class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical equal area projection - pixel to sky.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
lam = _ParameterDS(default=1)
class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical):
r"""
Cylindrical equal area projection - sky to pixel.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda}
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = _ParameterDS(default=1)
class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical):
r"""
Plate carrée projection - pixel to sky.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= y
"""
@staticmethod
def evaluate(x, y):
# The intermediate variables are only used here for clarity
phi = np.array(x)
theta = np.array(y)
return phi, theta
class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical):
r"""
Plate carrée projection - sky to pixel.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \theta
"""
@staticmethod
def evaluate(phi, theta):
# The intermediate variables are only used here for clarity
x = np.array(phi)
y = np.array(theta)
return x, y
class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical):
r"""
Mercator - pixel to sky.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ}
"""
class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical):
r"""
Mercator - sky to pixel.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right)
"""
class PseudoCylindrical(Projection):
r"""Base class for pseudocylindrical projections.
Pseudocylindrical projections are like cylindrical projections
except the parallels of latitude are projected at diminishing
lengths toward the polar regions in order to reduce lateral
distortion there. Consequently, the meridians are curved.
"""
_separable = True
class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - pixel to sky.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\cos y} \\
\theta &= y
"""
class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - sky to pixel.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
x &= \phi \cos \theta \\
y &= \theta
"""
class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical):
r"""
Parabolic projection - pixel to sky.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
\phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\
\theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right)
"""
class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical):
r"""
Parabolic projection - sky to pixel.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\
y &= 180^\circ \sin \frac{\theta}{3}
"""
class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical):
r"""
Molleweide's projection - pixel to sky.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\
\theta &= \sin^{-1}\left(
\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right)
+ \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}
\right)
"""
class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical):
r"""
Molleweide's projection - sky to pixel.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\
y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma
where :math:`\gamma` is defined as the solution of the
transcendental equation:
.. math::
\sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi}
"""
class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - pixel to sky.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
\phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right)
"""
class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - sky to pixel.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\
y &= \gamma \sin \theta
where:
.. math::
\gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}}
"""
class Conic(Projection):
r"""Base class for conic projections.
In conic projections, the sphere is thought to be projected onto
the surface of a cone which is then opened out.
In a general sense, the pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\
R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin (C \phi) \\
y &= R_\theta \cos (C \phi) + Y_0
where :math:`C` is the "constant of the cone":
.. math::
C = \frac{180^\circ \cos \theta}{\pi R_\theta}
"""
sigma = _ParameterDS(default=90.0, getter=_to_orig_unit, setter=_to_radian)
delta = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic):
r"""
Colles' conic perspective projection - pixel to sky.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic):
r"""
Colles' conic perspective projection - sky to pixel.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic):
r"""
Alber's conic equal area projection - pixel to sky.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic):
r"""
Alber's conic equal area projection - sky to pixel.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic):
r"""
Conic equidistant projection - pixel to sky.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic):
r"""
Conic equidistant projection - sky to pixel.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic):
r"""
Conic orthomorphic projection - pixel to sky.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic):
r"""
Conic orthomorphic projection - sky to pixel.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class PseudoConic(Projection):
r"""Base class for pseudoconic projections.
Pseudoconics are a subclass of conics with concentric parallels.
"""
class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - pixel to sky.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\
\theta &= Y_0 - R_\theta
where:
.. math::
R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\
A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right)
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - sky to pixel.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
x &= R_\theta \sin A_\phi \\
y &= -R_\theta \cos A_\phi + Y_0
where:
.. math::
A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\
R_\theta &= Y_0 - \theta \\
Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Bonne conformal latitude, in degrees",
)
class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic):
r"""
Polyconic projection - pixel to sky.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic):
r"""
Polyconic projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class QuadCube(Projection):
r"""Base class for quad cube projections.
Quadrilateralized spherical cube (quad-cube) projections belong to
the class of polyhedral projections in which the sphere is
projected onto the surface of an enclosing polyhedron.
The six faces of the quad-cube projections are numbered and laid
out as::
0
4 3 2 1 4 3 2
5
"""
class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Tangential spherical cube projection - pixel to sky.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube):
r"""
Tangential spherical cube projection - sky to pixel.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class HEALPix(Projection):
r"""Base class for HEALPix projections."""
class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix):
r"""
HEALPix - pixel to sky.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(
default=4.0, description="The number of facets in longitude direction."
)
X = _ParameterDS(
default=3.0, description="The number of facets in latitude direction."
)
class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix):
r"""
HEALPix projection - sky to pixel.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(
default=4.0, description="The number of facets in longitude direction."
)
X = _ParameterDS(
default=3.0, description="The number of facets in latitude direction."
)
class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class AffineTransformation2D(Model):
"""
Perform an affine transformation in 2 dimensions.
Parameters
----------
matrix : array
A 2x2 matrix specifying the linear transformation to apply to the
inputs
translation : array
A 2D vector (given as either a 2x1 or 1x2 array) specifying a
translation to apply to the inputs
"""
n_inputs = 2
n_outputs = 2
standard_broadcasting = False
_separable = False
matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]])
translation = Parameter(default=[0.0, 0.0])
@matrix.validator
def matrix(self, value):
"""Validates that the input matrix is a 2x2 2D array."""
if np.shape(value) != (2, 2):
raise InputParameterError(
"Expected transformation matrix to be a 2x2 array"
)
@translation.validator
def translation(self, value):
"""
Validates that the translation vector is a 2D vector. This allows
either a "row" vector or a "column" vector where in the latter case the
resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``.
"""
if not (
(np.ndim(value) == 1 and np.shape(value) == (2,))
or (np.ndim(value) == 2 and np.shape(value) == (1, 2))
):
raise InputParameterError(
"Expected translation vector to be a 2 element row or column "
"vector array"
)
def __init__(self, matrix=matrix, translation=translation, **kwargs):
super().__init__(matrix=matrix, translation=translation, **kwargs)
self.inputs = ("x", "y")
self.outputs = ("x", "y")
@property
def inverse(self):
"""
Inverse transformation.
Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted.
"""
det = np.linalg.det(self.matrix.value)
if det == 0:
raise InputParameterError(
f"Transformation matrix is singular; {self.__class__.__name__} model"
" does not have an inverse"
)
matrix = np.linalg.inv(self.matrix.value)
if self.matrix.unit is not None:
matrix = matrix * self.matrix.unit
# If matrix has unit then translation has unit, so no need to assign it.
translation = -np.dot(matrix, self.translation.value)
return self.__class__(matrix=matrix, translation=translation)
@classmethod
def evaluate(cls, x, y, matrix, translation):
"""
Apply the transformation to a set of 2D Cartesian coordinates given as
two lists--one for the x coordinates and one for a y coordinates--or a
single coordinate pair.
Parameters
----------
x, y : array, float
x and y coordinates
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
shape = x.shape or (1,)
# Use asarray to ensure loose the units.
inarr = np.vstack(
[np.asarray(x).ravel(), np.asarray(y).ravel(), np.ones(x.size, x.dtype)]
)
if inarr.shape[0] != 3 or inarr.ndim != 2:
raise ValueError("Incompatible input shapes")
augmented_matrix = cls._create_augmented_matrix(matrix, translation)
result = np.dot(augmented_matrix, inarr)
x, y = result[0], result[1]
x.shape = y.shape = shape
return x, y
@staticmethod
def _create_augmented_matrix(matrix, translation):
unit = None
if any([hasattr(translation, "unit"), hasattr(matrix, "unit")]):
if not all([hasattr(translation, "unit"), hasattr(matrix, "unit")]):
raise ValueError(
"To use AffineTransformation with quantities, "
"both matrix and unit need to be quantities."
)
unit = translation.unit
# matrix should have the same units as translation
if not (matrix.unit / translation.unit) == u.dimensionless_unscaled:
raise ValueError("matrix and translation must have the same units.")
augmented_matrix = np.empty((3, 3), dtype=float)
augmented_matrix[0:2, 0:2] = matrix
augmented_matrix[0:2, 2:].flat = translation
augmented_matrix[2] = [0, 0, 1]
if unit is not None:
return augmented_matrix * unit
return augmented_matrix
@property
def input_units(self):
if self.translation.unit is None and self.matrix.unit is None:
return None
elif self.translation.unit is not None:
return dict(zip(self.inputs, [self.translation.unit] * 2))
else:
return dict(zip(self.inputs, [self.matrix.unit] * 2))
for long_name, short_name in _PROJ_NAME_CODE:
# define short-name projection equivalent classes:
globals()["Pix2Sky_" + short_name] = globals()["Pix2Sky_" + long_name]
globals()["Sky2Pix_" + short_name] = globals()["Sky2Pix_" + long_name]
# set inverse classes:
globals()["Pix2Sky_" + long_name]._inv_cls = globals()["Sky2Pix_" + long_name]
globals()["Sky2Pix_" + long_name]._inv_cls = globals()["Pix2Sky_" + long_name]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.