hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
2400b0d781cdb8fbc8fb677ca43a6cf86c70ea95a3dd445c72a6a7f5c33046e3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import os
import pytest
from astropy import cosmology
from astropy.cosmology import Cosmology, w0wzCDM
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.io.tests import (
test_cosmology,
test_ecsv,
test_html,
test_json,
test_mapping,
test_model,
test_row,
test_table,
test_yaml,
)
from astropy.table import QTable, Row
from astropy.utils.compat.optional_deps import HAS_BS4
###############################################################################
# SETUP
cosmo_instances = cosmology.realizations.available
# Collect the registered read/write formats.
# (format, supports_metadata, has_all_required_dependencies)
readwrite_formats = {
("ascii.ecsv", True, True),
("ascii.html", False, HAS_BS4),
("json", True, True),
}
# Collect all the registered to/from formats. Unfortunately this is NOT
# automatic since the output format class is not stored on the registry.
# (format, data type)
tofrom_formats = [
("mapping", dict),
("yaml", str),
("astropy.cosmology", Cosmology),
("astropy.row", Row),
("astropy.table", QTable),
]
###############################################################################
class ReadWriteTestMixin(
test_ecsv.ReadWriteECSVTestMixin,
test_html.ReadWriteHTMLTestMixin,
test_json.ReadWriteJSONTestMixin,
):
"""
Tests for a CosmologyRead/Write on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestReadWriteCosmology`` or ``TestCosmology`` for examples.
"""
@pytest.mark.parametrize("format, metaio, has_deps", readwrite_formats)
def test_readwrite_complete_info(self, cosmo, tmp_path, format, metaio, has_deps):
"""
Test writing from an instance and reading from the base class.
This requires full information.
The round-tripped metadata can be in a different order, so the
OrderedDict must be converted to a dict before testing equality.
"""
if not has_deps:
pytest.skip("missing a dependency")
fname = str(tmp_path / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
# Also test kwarg "overwrite"
assert os.path.exists(fname) # file exists
with pytest.raises(IOError):
cosmo.write(fname, format=format, overwrite=False)
assert os.path.exists(fname) # overwrite file existing file
cosmo.write(fname, format=format, overwrite=True)
# Read back
got = Cosmology.read(fname, format=format)
assert got == cosmo
assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
@pytest.mark.parametrize("format, metaio, has_deps", readwrite_formats)
def test_readwrite_from_subclass_complete_info(
self, cosmo_cls, cosmo, tmp_path, format, metaio, has_deps
):
"""
Test writing from an instance and reading from that class, when there's
full information saved.
"""
if not has_deps:
pytest.skip("missing a dependency")
fname = str(tmp_path / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
# read with the same class that wrote.
got = cosmo_cls.read(fname, format=format)
assert got == cosmo
assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
# this should be equivalent to
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls)
assert got == cosmo
assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
# and also
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls.__qualname__)
assert got == cosmo
assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
class TestCosmologyReadWrite(ReadWriteTestMixin):
"""Test the classes CosmologyRead/Write."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format, _, has_deps", readwrite_formats)
def test_write_methods_have_explicit_kwarg_overwrite(self, format, _, has_deps):
if not has_deps:
pytest.skip("missing a dependency")
writer = readwrite_registry.get_writer(format, Cosmology)
# test in signature
sig = inspect.signature(writer)
assert "overwrite" in sig.parameters
# also in docstring
assert "overwrite : bool" in writer.__doc__
@pytest.mark.parametrize("format, _, has_deps", readwrite_formats)
def test_readwrite_reader_class_mismatch(
self, cosmo, tmp_path, format, _, has_deps
):
"""Test when the reader class doesn't match the file."""
if not has_deps:
pytest.skip("missing a dependency")
fname = tmp_path / f"{cosmo.name}.{format}"
cosmo.write(fname, format=format)
# class mismatch
# when reading directly
with pytest.raises(TypeError, match="missing 1 required"):
w0wzCDM.read(fname, format=format)
with pytest.raises(TypeError, match="missing 1 required"):
Cosmology.read(fname, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.read(fname, format=format, cosmology="FlatLambdaCDM")
###############################################################################
# To/From_Format Tests
class ToFromFormatTestMixin(
test_cosmology.ToFromCosmologyTestMixin,
test_mapping.ToFromMappingTestMixin,
test_model.ToFromModelTestMixin,
test_row.ToFromRowTestMixin,
test_table.ToFromTableTestMixin,
test_yaml.ToFromYAMLTestMixin,
):
"""
Tests for a Cosmology[To/From]Format on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_tofromformat_complete_info(
self, cosmo, format, totype, xfail_if_not_registered_with_yaml
):
"""Read tests happen later."""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# test from_format
got = Cosmology.from_format(obj, format=format)
# Test autodetect, if enabled
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj)
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_fromformat_subclass_complete_info(
self, cosmo_cls, cosmo, format, totype, xfail_if_not_registered_with_yaml
):
"""
Test transforming an instance and parsing from that class, when there's
full information available.
Partial information tests are handled in the Mixin super classes.
"""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# read with the same class that wrote.
got = cosmo_cls.from_format(obj, format=format)
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj) # and autodetect
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
# this should be equivalent to
got = Cosmology.from_format(obj, format=format, cosmology=cosmo_cls)
assert got == cosmo
assert got.meta == cosmo.meta
# and also
got = Cosmology.from_format(
obj, format=format, cosmology=cosmo_cls.__qualname__
)
assert got == cosmo
assert got.meta == cosmo.meta
class TestCosmologyToFromFormat(ToFromFormatTestMixin):
"""Test Cosmology[To/From]Format classes."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format_type", tofrom_formats)
def test_fromformat_class_mismatch(self, cosmo, format_type):
format, totype = format_type
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# class mismatch
with pytest.raises(TypeError):
w0wzCDM.from_format(obj, format=format)
with pytest.raises(TypeError):
Cosmology.from_format(obj, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.from_format(obj, format=format, cosmology="FlatLambdaCDM")
|
a452155d2815e68ed66f0a085ee524754087b8ec80f6e691ccf22cd7d69d258b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.core`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import inspect
import pickle
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Cosmology, FlatCosmologyMixin
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter
from astropy.table import Column, QTable, Table
from astropy.utils.compat import PYTHON_LT_3_11
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.metadata import MetaData
from .test_connect import ReadWriteTestMixin, ToFromFormatTestMixin
from .test_parameter import ParameterTestMixin
##############################################################################
# SETUP / TEARDOWN
scalar_zs = [
0,
1,
1100, # interesting times
# FIXME! np.inf breaks some funcs. 0 * inf is an error
np.float64(3300), # different type
2 * cu.redshift,
3 * u.one, # compatible units
]
_zarr = np.linspace(0, 1e5, num=20)
array_zs = [
_zarr, # numpy
_zarr.tolist(), # pure python
Column(_zarr), # table-like
_zarr * cu.redshift, # Quantity
]
valid_zs = scalar_zs + array_zs
invalid_zs = [
(None, TypeError), # wrong type
# Wrong units (the TypeError is for the cython, which can differ)
(4 * u.MeV, (u.UnitConversionError, TypeError)), # scalar
([0, 1] * u.m, (u.UnitConversionError, TypeError)), # array
]
class SubCosmology(Cosmology):
"""Defined here to be serializable."""
H0 = Parameter(unit="km/(s Mpc)")
Tcmb0 = Parameter(unit=u.K)
m_nu = Parameter(unit=u.eV)
def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
self.H0 = H0
self.Tcmb0 = Tcmb0
self.m_nu = m_nu
@property
def is_flat(self):
return super().is_flat()
##############################################################################
# TESTS
##############################################################################
class MetaTestMixin:
"""Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology."""
def test_meta_on_class(self, cosmo_cls):
assert isinstance(cosmo_cls.meta, MetaData)
def test_meta_on_instance(self, cosmo):
assert isinstance(cosmo.meta, dict) # test type
# value set at initialization
assert cosmo.meta == self.cls_kwargs.get("meta", {})
def test_meta_mutable(self, cosmo):
"""The metadata is NOT immutable on a cosmology"""
key = tuple(cosmo.meta.keys())[0] # select some key
cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable
class TestCosmology(
ParameterTestMixin,
MetaTestMixin,
ReadWriteTestMixin,
ToFromFormatTestMixin,
metaclass=abc.ABCMeta,
):
"""Test :class:`astropy.cosmology.Cosmology`.
Subclasses should define tests for:
- ``test_clone_change_param()``
- ``test_repr()``
"""
def setup_class(self):
"""
Setup for testing.
Cosmology should not be instantiated, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubCosmology"] = SubCosmology
self.cls = SubCosmology
self._cls_args = dict(
H0=70 * (u.km / u.s / u.Mpc), Tcmb0=2.7 * u.K, m_nu=0.6 * u.eV
)
self.cls_kwargs = dict(name=self.__class__.__name__, meta={"a": "b"})
def teardown_class(self):
_COSMOLOGY_CLASSES.pop("SubCosmology", None)
@property
def cls_args(self):
return tuple(self._cls_args.values())
@pytest.fixture(scope="class")
def cosmo_cls(self):
"""The Cosmology class as a :func:`pytest.fixture`."""
return self.cls
@pytest.fixture(scope="function") # ensure not cached.
def ba(self):
"""Return filled `inspect.BoundArguments` for cosmology."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return ba
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""The cosmology instance with which to test."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return cosmo_cls(*ba.args, **ba.kwargs)
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test creating subclasses registers classes and manages Parameters."""
class InitSubclassTest(cosmo_cls):
pass
# test parameters
assert InitSubclassTest.__parameters__ == cosmo_cls.__parameters__
# test and cleanup registry
registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__)
assert registrant is InitSubclassTest
def test_init_signature(self, cosmo_cls, cosmo):
"""Test class-property ``_init_signature``."""
# test presence
assert hasattr(cosmo_cls, "_init_signature")
assert hasattr(cosmo, "_init_signature")
# test internal consistency, so following tests can use either cls or instance.
assert cosmo_cls._init_signature == cosmo._init_signature
# test matches __init__, but without 'self'
sig = inspect.signature(cosmo.__init__) # (instances don't have self)
assert set(sig.parameters.keys()) == set(
cosmo._init_signature.parameters.keys()
)
assert all(
np.all(sig.parameters[k].default == p.default)
for k, p in cosmo._init_signature.parameters.items()
)
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
"""Test initialization."""
# Cosmology only does name and meta, but this subclass adds H0 & Tcmb0.
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta={"m": 1})
assert cosmo.name == "test_init"
assert cosmo.meta["m"] == 1
# if meta is None, it is changed to a dict
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta=None)
assert cosmo.meta == {}
def test_name(self, cosmo):
"""Test property ``name``."""
assert cosmo.name is cosmo._name # accesses private attribute
assert cosmo.name is None or isinstance(cosmo.name, str) # type
assert cosmo.name == self.cls_kwargs["name"] # test has expected value
# immutable
match = (
"can't set"
if PYTHON_LT_3_11
else f"property 'name' of {cosmo.__class__.__name__!r} object has no setter"
)
with pytest.raises(AttributeError, match=match):
cosmo.name = None
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``. It's an ABC."""
with pytest.raises(NotImplementedError, match="is_flat is not implemented"):
cosmo.is_flat
# ------------------------------------------------
# clone
def test_clone_identical(self, cosmo):
"""Test method ``.clone()`` if no (kw)args."""
assert cosmo.clone() is cosmo
def test_clone_name(self, cosmo):
"""Test method ``.clone()`` name argument."""
# test changing name. clone treats 'name' differently (see next test)
c = cosmo.clone(name="cloned cosmo")
assert c.name == "cloned cosmo" # changed
# show name is the only thing changed
c._name = cosmo.name # first change name back
assert c == cosmo
assert c.meta == cosmo.meta
# now change a different parameter and see how 'name' changes
c = cosmo.clone(meta={"test_clone_name": True})
assert c.name == cosmo.name + " (modified)"
def test_clone_meta(self, cosmo):
"""Test method ``.clone()`` meta argument: updates meta, doesn't clear."""
# start with no change
c = cosmo.clone(meta=None)
assert c.meta == cosmo.meta
# add something
c = cosmo.clone(meta=dict(test_clone_meta=True))
assert c.meta["test_clone_meta"] is True
c.meta.pop("test_clone_meta") # remove from meta
assert c.meta == cosmo.meta # now they match
def test_clone_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s).
Nothing here b/c no Parameters.
"""
def test_clone_fail_unexpected_arg(self, cosmo):
"""Test when ``.clone()`` gets an unexpected argument."""
with pytest.raises(TypeError, match="unexpected keyword argument"):
cosmo.clone(not_an_arg=4)
def test_clone_fail_positional_arg(self, cosmo):
with pytest.raises(TypeError, match="1 positional argument"):
cosmo.clone(None)
# ---------------------------------------------------------------
# comparison methods
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`."""
# to self
assert cosmo.is_equivalent(cosmo)
# same class, different instance
newclone = cosmo.clone(name="test_is_equivalent")
assert cosmo.is_equivalent(newclone)
assert newclone.is_equivalent(cosmo)
# different class and not convertible to Cosmology.
assert not cosmo.is_equivalent(2)
def test_equality(self, cosmo):
"""Test method ``.__eq__()."""
# wrong class
assert (cosmo != 2) and (2 != cosmo)
# correct
assert cosmo == cosmo
# different name <= not equal, but equivalent
newcosmo = cosmo.clone(name="test_equality")
assert (cosmo != newcosmo) and (newcosmo != cosmo)
assert cosmo.__equiv__(newcosmo) and newcosmo.__equiv__(cosmo)
# ---------------------------------------------------------------
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``.
This is a very general test and it is probably good to have a
hard-coded comparison.
"""
r = repr(cosmo)
# class in string rep
assert cosmo_cls.__qualname__ in r
assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing
r = r[len(cosmo_cls.__qualname__) + 1 :] # remove
# name in string rep
if cosmo.name is not None:
assert f'name="{cosmo.name}"' in r
assert r.index("name=") == 0
r = r[6 + len(cosmo.name) + 3 :] # remove
# parameters in string rep
ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__}
for k, v in ps.items():
sv = f"{k}={v}"
assert sv in r
assert r.index(k) == 0
r = r[len(sv) + 2 :] # remove
# ------------------------------------------------
@pytest.mark.parametrize("in_meta", [True, False])
@pytest.mark.parametrize("table_cls", [Table, QTable])
def test_astropy_table(self, cosmo, table_cls, in_meta):
"""Test ``astropy.table.Table(cosmology)``."""
tbl = table_cls(cosmo, cosmology_in_meta=in_meta)
assert isinstance(tbl, table_cls)
# the name & all parameters are columns
for n in ("name", *cosmo.__parameters__):
assert n in tbl.colnames
assert np.all(tbl[n] == getattr(cosmo, n))
# check if Cosmology is in metadata or a column
if in_meta:
assert tbl.meta["cosmology"] == cosmo.__class__.__qualname__
assert "cosmology" not in tbl.colnames
else:
assert "cosmology" not in tbl.meta
assert tbl["cosmology"][0] == cosmo.__class__.__qualname__
# the metadata is transferred
for k, v in cosmo.meta.items():
assert np.all(tbl.meta[k] == v)
# ===============================================================
# Usage Tests
def test_immutability(self, cosmo):
"""
Test immutability of cosmologies.
The metadata is mutable: see ``test_meta_mutable``.
"""
for n in cosmo.__all_parameters__:
with pytest.raises(AttributeError):
setattr(cosmo, n, getattr(cosmo, n))
def test_pickle_class(self, cosmo_cls, pickle_protocol):
"""Test classes can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo_cls, protocol=pickle_protocol)
unpickled = pickle.loads(f)
# test equality
assert unpickled == cosmo_cls
def test_pickle_instance(self, cosmo, pickle_protocol):
"""Test instances can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == cosmo
assert unpickled.meta == cosmo.meta
class CosmologySubclassTest(TestCosmology):
"""
Test subclasses of :class:`astropy.cosmology.Cosmology`.
This is broken away from ``TestCosmology``, because |Cosmology| is/will be
an ABC and subclasses must override some methods.
"""
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# instance-level
@abc.abstractmethod
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# -----------------------------------------------------------------------------
class FlatCosmologyMixinTest:
"""Tests for :class:`astropy.cosmology.core.FlatCosmologyMixin` subclasses.
The test suite structure mirrors the implementation of the tested code.
Just like :class:`astropy.cosmology.FlatCosmologyMixin` is an abstract
base class (ABC) that cannot be used by itself, so too is this corresponding
test class an ABC mixin.
E.g to use this class::
class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology):
...
"""
def test_nonflat_class_(self, cosmo_cls, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat_cls`."""
# Test it's a method on the class
assert issubclass(cosmo_cls, cosmo_cls.__nonflatclass__)
# It also works from the instance. # TODO! as a "metaclassmethod"
assert issubclass(cosmo_cls, cosmo.__nonflatclass__)
# Maybe not the most robust test, but so far all Flat classes have the
# name of their parent class.
assert cosmo.__nonflatclass__.__name__ in cosmo_cls.__name__
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
super().test_is_flat(cosmo_cls, cosmo)
# it's always True
assert cosmo.is_flat is True
def test_nonflat(self, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat`."""
assert cosmo.nonflat.is_equivalent(cosmo)
assert cosmo.is_equivalent(cosmo.nonflat)
# ------------------------------------------------
# clone
def test_clone_to_nonflat_equivalent(self, cosmo):
"""Test method ``.clone()``to_nonflat argument."""
# just converting the class
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
@abc.abstractmethod
def test_clone_to_nonflat_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s). No parameters
are changed here because FlatCosmologyMixin has no Parameters.
See class docstring for why this test method exists.
"""
# send to non-flat
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
# ------------------------------------------------
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`.
Normally this would pass up via super(), but ``__equiv__`` is meant
to be overridden, so we skip super().
e.g. FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestCosmology
vs FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestFLRW -> TestCosmology
"""
CosmologySubclassTest.test_is_equivalent(self, cosmo)
# See FlatFLRWMixinTest for tests. It's a bit hard here since this class
# is for an ABC.
# ===============================================================
# Usage Tests
def test_subclassing(self, cosmo_cls):
"""Test when subclassing a flat cosmology."""
class SubClass1(cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass1.__nonflatclass__ is cosmo_cls.__nonflatclass__
# A more complex example is when Mixin classes are used.
class Mixin:
pass
class SubClass2(Mixin, cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass2.__nonflatclass__ is cosmo_cls.__nonflatclass__
# The order of the Mixin should not matter
class SubClass3(cosmo_cls, Mixin):
pass
# The classes have the same non-flat parent class
assert SubClass3.__nonflatclass__ is cosmo_cls.__nonflatclass__
def test__nonflatclass__multiple_nonflat_inheritance():
"""
Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.__nonflatclass__`
when there's more than one non-flat class in the inheritance.
"""
# Define a non-operable minimal subclass of Cosmology.
class SubCosmology2(Cosmology):
def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
@property
def is_flat(self):
return False
# Now make an ambiguous flat cosmology from the two SubCosmologies
with pytest.raises(TypeError, match="cannot create a consistent non-flat class"):
class FlatSubCosmology(FlatCosmologyMixin, SubCosmology, SubCosmology2):
@property
def nonflat(self):
pass
# -----------------------------------------------------------------------------
def test_flrw_moved_deprecation():
"""Test the deprecation warning about the move of FLRW classes."""
from astropy.cosmology import flrw
# it's deprecated to import `flrw/*` from `core.py`
with pytest.warns(AstropyDeprecationWarning):
from astropy.cosmology.core import FLRW
# but they are the same object
assert FLRW is flrw.FLRW
|
455821f6d63c20510ca5cd5248f6456057e6d1e0382213049d9f60b353ed6576 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the cosmology test suite
from the installed astropy. It makes use of the `pytest`_ testing framework.
"""
##############################################################################
# IMPORTS
# STDLIB
import inspect
# THIRD PARTY
import pytest
# LOCAL
from astropy.cosmology import core
__all__ = ["get_redshift_methods", "clean_registry"]
###############################################################################
# FUNCTIONS
def get_redshift_methods(cosmology, include_private=True, include_z2=True):
"""Get redshift methods from a cosmology.
Parameters
----------
cosmology : |Cosmology| class or instance
include_private : bool
Whether to include private methods, i.e. starts with an underscore.
include_z2 : bool
Whether to include methods that are functions of 2 (or more) redshifts,
not the more common 1 redshift argument.
Returns
-------
set[str]
The names of the redshift methods on `cosmology`, satisfying
`include_private` and `include_z2`.
"""
# Get all the method names, optionally sieving out private methods
methods = set()
for n in dir(cosmology):
try: # get method, some will error on ABCs
m = getattr(cosmology, n)
except NotImplementedError:
continue
# Add anything callable, optionally excluding private methods.
if callable(m) and (not n.startswith("_") or include_private):
methods.add(n)
# Sieve out incompatible methods.
# The index to check for redshift depends on whether cosmology is a class
# or instance and does/doesn't include 'self'.
iz1 = 1 if inspect.isclass(cosmology) else 0
for n in tuple(methods):
try:
sig = inspect.signature(getattr(cosmology, n))
except ValueError: # Remove non-introspectable methods.
methods.discard(n)
continue
else:
params = list(sig.parameters.keys())
# Remove non redshift methods:
if len(params) <= iz1: # Check there are enough arguments.
methods.discard(n)
elif len(params) >= iz1 + 1 and not params[iz1].startswith(
"z"
): # First non-self arg is z.
methods.discard(n)
# If methods with 2 z args are not allowed, the following arg is checked.
elif (
not include_z2
and (len(params) >= iz1 + 2)
and params[iz1 + 1].startswith("z")
):
methods.discard(n)
return methods
###############################################################################
# FIXTURES
@pytest.fixture
def clean_registry():
"""`pytest.fixture` for clearing and restoring ``_COSMOLOGY_CLASSES``."""
# TODO! with monkeypatch instead for thread safety.
ORIGINAL_COSMOLOGY_CLASSES = core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = {} # set as empty dict
yield core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = ORIGINAL_COSMOLOGY_CLASSES
|
43414ab07f3f7e3030f0e01df5e12c87242c76ea280735c4e73155384dc6bc75 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Stand-alone overall systems tests for :mod:`astropy.cosmology`."""
from io import StringIO
import numpy as np
import pytest
import astropy.constants as const
import astropy.units as u
from astropy.cosmology import flrw
from astropy.cosmology.realizations import Planck18
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.")
def test_flat_z1():
"""Test a flat cosmology at z=1 against several other on-line calculators.
Test values were taken from the following web cosmology calculators on
2012-02-11:
Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
(https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)
Kempner: http://www.kempner.net/cosmic.php
iCosmos: http://www.icosmos.co.uk/index.html
"""
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
# The order of values below is Wright, Kempner, iCosmos'
assert allclose(
cosmo.comoving_distance(1), [3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4
)
assert allclose(
cosmo.angular_diameter_distance(1),
[1682.3, 1682.4, 1682.3994] * u.Mpc,
rtol=1e-4,
)
assert allclose(
cosmo.luminosity_distance(1), [6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4
)
assert allclose(cosmo.lookback_time(1), [7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)
assert allclose(
cosmo.lookback_distance(1), [2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.")
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a Mathematica computation."""
z = np.array([0.2, 0.4, 0.9, 1.2])
# w0wa models
cosmo = flrw.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
assert allclose(
cosmo.luminosity_distance(z),
[1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc,
rtol=1e-4,
)
assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
assert allclose(
cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957],
)
cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
assert allclose(
cosmo.luminosity_distance(z),
[971.667, 2141.67, 5685.96, 8107.41] * u.Mpc,
rtol=1e-4,
)
cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5, Tcmb0=0.0)
assert allclose(
cosmo.luminosity_distance(z),
[974.087, 2157.08, 5783.92, 8274.08] * u.Mpc,
rtol=1e-4,
)
# wpwa models
cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, Tcmb0=0.0)
assert allclose(
cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc,
rtol=1e-4,
)
cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, Tcmb0=0.0)
assert allclose(
cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc,
rtol=1e-4,
)
###############################################################################
# TODO! sort and refactor following tests.
# overall systems tests stay here, specific tests go to new test suite.
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_units():
"""Test if the right units are being returned"""
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm**3
assert cosmo.comoving_volume(1.0).unit == u.Mpc**3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distance_broadcast():
"""Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = flrw.FlatLambdaCDM(
H0=70, Om0=0.27, m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV)
)
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = [
"comoving_distance",
"luminosity_distance",
"comoving_transverse_distance",
"angular_diameter_distance",
"distmod",
"lookback_time",
"age",
"comoving_volume",
"differential_comoving_volume",
"kpc_comoving_per_arcmin",
]
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = [
"absorption_distance",
"Om",
"Ode",
"Ok",
"H",
"w",
"de_density_scale",
"Onu",
"Ogamma",
"nu_relative_density",
]
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Test some dark energy models
methods = ["Om", "Ode", "w", "de_density_scale"]
for tcosmo in [
flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5),
flrw.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2),
flrw.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2),
flrw.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5, wp=-1.2, wa=-0.2, zp=0.9),
flrw.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1),
]:
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
def test_equality():
"""Test equality and equivalence."""
# mismatched signatures, both directions.
newcosmo = flrw.w0waCDM(**Planck18._init_arguments, Ode0=0.6)
assert newcosmo != Planck18
assert Planck18 != newcosmo
def test_xtfuncs():
"""Test of absorption and lookback integrand"""
cosmo = flrw.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
z = np.array([2.0, 3.2])
assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378, rtol=1e-4)
assert allclose(
cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541], rtol=1e-4
)
assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402, rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758], rtol=1e-4)
# This class is to test whether the routines work correctly
# if one only overloads w(z)
class test_cos_sub(flrw.FLRW):
def __init__(self):
super().__init__(70.0, 0.27, 0.73, Tcmb0=0.0, name="test_cos")
self._w0 = -0.9
def w(self, z):
return self._w0 * np.ones_like(z)
# Similar, but with neutrinos
class test_cos_subnu(flrw.FLRW):
def __init__(self):
super().__init__(
70.0, 0.27, 0.73, Tcmb0=3.0, m_nu=0.1 * u.eV, name="test_cos_nu"
)
self._w0 = -0.8
def w(self, z):
return self._w0 * np.ones_like(z)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_de_subclass():
# This is the comparison object
z = [0.2, 0.4, 0.6, 0.9]
cosmo = flrw.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)
# Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012
assert allclose(
cosmo.luminosity_distance(z), [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3
)
# Now try the subclass that only gives w(z)
cosmo = test_cos_sub()
assert allclose(
cosmo.luminosity_distance(z), [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3
)
# Test efunc
assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
assert allclose(cosmo.efunc([0.5, 1.0]), [1.31744953, 1.7489240754], rtol=1e-5)
assert allclose(cosmo.inv_efunc([0.5, 1.0]), [0.75904236, 0.57178011], rtol=1e-5)
# Test de_density_scale
assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
assert allclose(
cosmo.de_density_scale([0.5, 1.0]), [1.12934694, 1.23114444], rtol=1e-4
)
# Add neutrinos for efunc, inv_efunc
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_matter():
# Test non-relativistic matter evolution
tcos = flrw.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
assert allclose(tcos.Om0, 0.3)
assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)
assert allclose(tcos.Om(0), 0.3)
assert allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455], rtol=1e-4)
assert allclose(tcos.Ob(z), [0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4)
assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636], rtol=1e-4)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = flrw.FlatLambdaCDM(70.0, 0.3)
assert allclose(tcos.Ok0, 0.0)
assert allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0], rtol=1e-6)
# Not flat
tcos = flrw.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
assert allclose(tcos.Ok0, 0.2)
assert allclose(tcos.Ok(0), 0.2)
assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692], rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
assert allclose(
tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z), [1.0, 1.0, 1.0, 1.0], rtol=1e-5
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = flrw.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
assert allclose(tcos.Ode0, 0.7)
assert allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545], rtol=1e-5)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparison is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert allclose(
cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc,
rtol=5e-4,
)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert allclose(
cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc,
rtol=5e-4,
)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert allclose(
cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc,
rtol=5e-4,
)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert allclose(
cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc,
rtol=1e-5,
)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert allclose(
cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc,
rtol=1e-5,
)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert allclose(
cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc,
rtol=1e-5,
)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0**3 * 2.725**4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7**2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert allclose(cosmo.comoving_distance(z.astype(int)), targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_tcmb():
cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
assert allclose(cosmo.Tcmb0, 2.5 * u.K)
assert allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
assert allclose(cosmo.Tcmb(z), [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
assert allclose(cosmo.Tcmb(z), [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_tnu():
cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_efunc_vs_invefunc_flrw():
"""Test that efunc and inv_efunc give inverse values"""
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# FLRW is abstract, so requires test_cos_sub defined earlier
# This requires scipy, unlike the built-ins, because it
# calls de_density_scale, which has an integral in it
cosmo = test_cos_sub()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# Add neutrinos
cosmo = test_cos_subnu()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_kpc_methods():
cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(cosmo.arcsec_per_kpc_comoving(3), 0.0317179167 * u.arcsec / u.kpc)
assert allclose(cosmo.arcsec_per_kpc_proper(3), 0.1268716668 * u.arcsec / u.kpc)
assert allclose(cosmo.kpc_comoving_per_arcmin(3), 1891.6753126 * u.kpc / u.arcmin)
assert allclose(cosmo.kpc_proper_per_arcmin(3), 472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_comoving_volume():
c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = (
np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802]) * u.Gpc**3
)
wright_open = (
np.array([20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814]) * u.Gpc**3
)
wright_closed = (
np.array([12.619, 44.708, 114.904, 173.709, 258.82, 358.992]) * u.Gpc**3
)
# The wright calculator isn't very accurate, so we use a rather
# modest precision
assert allclose(c_flat.comoving_volume(redshifts), wright_flat, rtol=1e-2)
assert allclose(c_open.comoving_volume(redshifts), wright_open, rtol=1e-2)
assert allclose(c_closed.comoving_volume(redshifts), wright_closed, rtol=1e-2)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_differential_comoving_volume():
from scipy.integrate import quad
c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test that integration of differential_comoving_volume()
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = (
np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802]) * u.Gpc**3
)
wright_open = (
np.array([20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814]) * u.Gpc**3
)
wright_closed = (
np.array([12.619, 44.708, 114.904, 173.709, 258.82, 358.992]) * u.Gpc**3
)
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
def ftemp(x):
return c_flat.differential_comoving_volume(x).value
def otemp(x):
return c_open.differential_comoving_volume(x).value
def ctemp(x):
return c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
assert allclose(
np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0] for redshift in redshifts])
* u.Mpc**3,
wright_flat,
rtol=1e-2,
)
assert allclose(
np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0] for redshift in redshifts])
* u.Mpc**3,
wright_open,
rtol=1e-2,
)
assert allclose(
np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0] for redshift in redshifts])
* u.Mpc**3,
wright_closed,
rtol=1e-2,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_flat_open_closed_icosmo():
"""Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
cosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
cosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
cosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_integral():
# Test integer vs. floating point inputs
cosmo = flrw.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
assert allclose(cosmo.comoving_distance(3), cosmo.comoving_distance(3.0), rtol=1e-7)
assert allclose(
cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7,
)
assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
assert allclose(cosmo.efunc([1, 2, 6]), cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
assert allclose(
cosmo.inv_efunc([1, 2, 6]), cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_de_densityscale():
cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert allclose(cosmo.de_density_scale(z), [1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(
cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1.0, 2.0, 3.0]),
rtol=1e-7,
)
cosmo = flrw.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)
assert allclose(
cosmo.de_density_scale(z),
[1.15369, 1.31453, 1.83712, 3.95285, 6.5479],
rtol=1e-4,
)
assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(
cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1.0, 2.0, 3.0]),
rtol=1e-7,
)
cosmo = flrw.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
assert allclose(
cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129, 0.0035916468],
rtol=1e-4,
)
assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(
cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1.0, 2.0, 3.0]),
rtol=1e-7,
)
cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(
cosmo.de_density_scale(z),
[0.9934201, 0.9767912, 0.897450, 0.622236, 0.4458753],
rtol=1e-4,
)
assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(
cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1.0, 2.0, 3.0]),
rtol=1e-7,
)
cosmo = flrw.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, wa=0.2, zp=0.5)
assert allclose(
cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439, 1.324988, 1.565746],
rtol=1e-4,
)
assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(
cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1.0, 2.0, 3.0]),
rtol=1e-7,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)
assert allclose(tcos.age([1.0, 5.0]), [5.97113193, 1.20553129] * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
# Add relativistic species
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)
# And massive neutrinos
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0, m_nu=0.1 * u.eV)
assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
assert allclose(tcos.distmod([1, 5]), [44.124857, 48.40167258] * u.mag)
assert allclose(tcos.distmod([1.0, 5.0]), [44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = flrw.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
assert allclose(
tcos.luminosity_distance([50, 100]), [16612.44047622, -46890.79092244] * u.Mpc
)
assert allclose(tcos.distmod([50, 100]), [46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_critical_density():
from astropy.constants import codata2014
# WMAP7 but with Omega_relativistic = 0
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py.
# critical_density0 is inversely proportional to G.
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
fac = (const.G / codata2014.G).to(u.dimensionless_unscaled).value
assert allclose(
tcos.critical_density0 * fac, 9.309668456020899e-30 * (u.g / u.cm**3)
)
assert allclose(tcos.critical_density0, tcos.critical_density(0))
assert allclose(
tcos.critical_density([1, 5]) * fac,
[2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3),
)
assert allclose(
tcos.critical_density([1.0, 5.0]) * fac,
[2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3),
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_comoving_distance_z1z2():
tcos = flrw.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))
# Comoving distances are invertible
assert allclose(
tcos._comoving_distance_z1z2(1, 2), -tcos._comoving_distance_z1z2(2, 1)
)
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (
3767.90579253,
2386.25591391,
-1381.64987862,
2893.11776663,
174.1524683,
) * u.Mpc
assert allclose(tcos._comoving_distance_z1z2(z1, z2), results)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_age_in_special_cosmologies():
"""Check that age in de Sitter and Einstein-de Sitter Universes work.
Some analytic solutions fail at these critical points.
"""
c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.age(z=0), np.inf * u.Gyr)
assert allclose(c_dS.age(z=1), np.inf * u.Gyr)
assert allclose(c_dS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr)
c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr)
assert allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distance_in_special_cosmologies():
"""Check that de Sitter and Einstein-de Sitter Universes both work.
Some analytic solutions fail at these critical points.
"""
c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
c_dS = flrw.LambdaCDM(100, 0, 1, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = flrw.LambdaCDM(100, 1, 0, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_comoving_transverse_distance_z1z2():
tcos = flrw.FlatLambdaCDM(100, 0.3, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert allclose(
tcos._comoving_transverse_distance_z1z2(1, 2), 1313.2232194828466 * u.Mpc
)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(
tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2),
)
# Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid.
tcos = flrw.FlatLambdaCDM(100, 1.5, Tcmb0=0.0)
results = (
2202.72682564,
1559.51679971,
-643.21002593,
1408.36365679,
85.09286258,
) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(
tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2),
)
# Test non-flat cases to avoid simply testing
# comoving_distance_z1z2. Test array, array case.
tcos = flrw.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)
results = (
3535.931375645655,
2226.430046551708,
-1208.6817970036532,
2595.567367601969,
151.36592003406884,
) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results)
# Test positive curvature with scalar, array combination.
tcos = flrw.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)
z1 = 0.1
z2 = 0, 0.1, 0.2, 0.5, 1.1, 2
results = (
-281.31602666724865,
0.0,
248.58093707820436,
843.9331377460543,
1618.6104987686672,
2287.5626543279927,
) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_angular_diameter_distance_z1z2():
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])
# Tests that should actually work
assert allclose(
tcos.angular_diameter_distance_z1z2(1, 2), 646.22968662822018 * u.Mpc
)
z1 = 2 # Separate test for z2<z1, returns negative value with warning
z2 = 1
results = -969.34452994 * u.Mpc
with pytest.warns(AstropyUserWarning, match="less than first redshift"):
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results)
z1 = 0, 0, 0.5, 1
z2 = 2, 1, 2.5, 1.1
results = (
1760.0628637762106,
1670.7497657219858,
1159.0970895962193,
115.72768186186921,
) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results)
z1 = 0.1
z2 = 0.1, 0.2, 0.5, 1.1, 2
results = (0.0, 332.09893173, 986.35635069, 1508.37010062, 1621.07937976) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2), results)
# Non-flat (positive Ok0) test
tcos = flrw.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
assert allclose(
tcos.angular_diameter_distance_z1z2(1, 2), 620.1175337852428 * u.Mpc
)
# Non-flat (negative Ok0) test
tcos = flrw.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)
assert allclose(
tcos.angular_diameter_distance_z1z2(1, 2), 228.42914659246014 * u.Mpc
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_absorption_distance():
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.absorption_distance([1, 3]), [1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance([1.0, 3.0]), [1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance(3), 7.98685853)
assert allclose(tcos.absorption_distance(3.0), 7.98685853)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distances():
# Test distance calculations for various special case
# scenarios (no relativistic species, normal, massive neutrinos)
# These do not come from external codes -- they are just internal
# checks to make sure nothing changes if we muck with the distance
# calculators
z = np.array([1.0, 2.0, 3.0, 4.0])
# The pattern here is: no relativistic species, the relativistic
# species with massless neutrinos, then massive neutrinos
cos = flrw.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0)
assert allclose(
cos.comoving_distance(z),
[2953.93001902, 4616.7134253, 5685.07765971, 6440.80611897] * u.Mpc,
rtol=1e-4,
)
cos = flrw.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(
cos.comoving_distance(z),
[3037.12620424, 4776.86236327, 5889.55164479, 6671.85418235] * u.Mpc,
rtol=1e-4,
)
cos = flrw.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV))
assert allclose(
cos.comoving_distance(z),
[2471.80626824, 3567.1902565, 4207.15995626, 4638.20476018] * u.Mpc,
rtol=1e-4,
)
# Flat
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0)
assert allclose(
cos.comoving_distance(z),
[3180.83488552, 5060.82054204, 6253.6721173, 7083.5374303] * u.Mpc,
rtol=1e-4,
)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(
cos.comoving_distance(z),
[3180.42662867, 5059.60529655, 6251.62766102, 7080.71698117] * u.Mpc,
rtol=1e-4,
)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV))
assert allclose(
cos.comoving_distance(z),
[2337.54183142, 3371.91131264, 3988.40711188, 4409.09346922] * u.Mpc,
rtol=1e-4,
)
# Add w
cos = flrw.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0)
assert allclose(
cos.comoving_distance(z),
[3216.8296894, 5117.2097601, 6317.05995437, 7149.68648536] * u.Mpc,
rtol=1e-4,
)
cos = flrw.FlatwCDM(
75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[3143.56537758, 5000.32196494, 6184.11444601, 7009.80166062] * u.Mpc,
rtol=1e-4,
)
cos = flrw.FlatwCDM(
75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[2337.76035371, 3372.1971387, 3988.71362289, 4409.40817174] * u.Mpc,
rtol=1e-4,
)
# Non-flat w
cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0)
assert allclose(
cos.comoving_distance(z),
[2849.6163356, 4428.71661565, 5450.97862778, 6179.37072324] * u.Mpc,
rtol=1e-4,
)
cos = flrw.wCDM(
75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[2904.35580229, 4511.11471267, 5543.43643353, 6275.9206788] * u.Mpc,
rtol=1e-4,
)
cos = flrw.wCDM(
75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[2473.32522734, 3581.54519631, 4232.41674426, 4671.83818117] * u.Mpc,
rtol=1e-4,
)
# w0wa
cos = flrw.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0)
assert allclose(
cos.comoving_distance(z),
[2937.7807638, 4572.59950903, 5611.52821924, 6339.8549956] * u.Mpc,
rtol=1e-4,
)
cos = flrw.w0waCDM(
75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[2907.34722624, 4539.01723198, 5593.51611281, 6342.3228444] * u.Mpc,
rtol=1e-4,
)
cos = flrw.w0waCDM(
75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[2507.18336722, 3633.33231695, 4292.44746919, 4736.35404638] * u.Mpc,
rtol=1e-4,
)
# Flatw0wa
cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0)
assert allclose(
cos.comoving_distance(z),
[3123.29892781, 4956.15204302, 6128.15563818, 6948.26480378] * u.Mpc,
rtol=1e-4,
)
cos = flrw.Flatw0waCDM(
75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[3122.92671907, 4955.03768936, 6126.25719576, 6945.61856513] * u.Mpc,
rtol=1e-4,
)
cos = flrw.Flatw0waCDM(
75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[2337.70072701, 3372.13719963, 3988.6571093, 4409.35399673] * u.Mpc,
rtol=1e-4,
)
# wpwa
cos = flrw.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0)
assert allclose(
cos.comoving_distance(z),
[2954.68975298, 4599.83254834, 5643.04013201, 6373.36147627] * u.Mpc,
rtol=1e-4,
)
cos = flrw.wpwaCDM(
75.0,
0.25,
0.5,
wp=-0.9,
zp=0.4,
wa=0.1,
Tcmb0=3.0,
Neff=3,
m_nu=u.Quantity(0.0, u.eV),
)
assert allclose(
cos.comoving_distance(z),
[2919.00656215, 4558.0218123, 5615.73412391, 6366.10224229] * u.Mpc,
rtol=1e-4,
)
cos = flrw.wpwaCDM(
75.0,
0.25,
0.5,
wp=-0.9,
zp=1.0,
wa=0.1,
Tcmb0=3.0,
Neff=4,
m_nu=u.Quantity(5.0, u.eV),
)
assert allclose(
cos.comoving_distance(z),
[2629.48489827, 3874.13392319, 4614.31562397, 5116.51184842] * u.Mpc,
rtol=1e-4,
)
# w0wz
cos = flrw.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0)
assert allclose(
cos.comoving_distance(z),
[3051.68786716, 4756.17714818, 5822.38084257, 6562.70873734] * u.Mpc,
rtol=1e-4,
)
cos = flrw.w0wzCDM(
75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[2997.8115653, 4686.45599916, 5764.54388557, 6524.17408738] * u.Mpc,
rtol=1e-4,
)
cos = flrw.w0wzCDM(
75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0, Neff=4, m_nu=u.Quantity(5.0, u.eV)
)
assert allclose(
cos.comoving_distance(z),
[2676.73467639, 3940.57967585, 4686.90810278, 5191.54178243] * u.Mpc,
rtol=1e-4,
)
# Also test different numbers of massive neutrinos
# for FlatLambdaCDM to give the scalar nu density functions a
# work out
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, m_nu=u.Quantity([10.0, 0, 0], u.eV))
assert allclose(
cos.comoving_distance(z),
[2777.71589173, 4186.91111666, 5046.0300719, 5636.10397302] * u.Mpc,
rtol=1e-4,
)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, m_nu=u.Quantity([10.0, 5, 0], u.eV))
assert allclose(
cos.comoving_distance(z),
[2636.48149391, 3913.14102091, 4684.59108974, 5213.07557084] * u.Mpc,
rtol=1e-4,
)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, m_nu=u.Quantity([4.0, 5, 9], u.eV))
assert allclose(
cos.comoving_distance(z),
[2563.5093049, 3776.63362071, 4506.83448243, 5006.50158829] * u.Mpc,
rtol=1e-4,
)
cos = flrw.FlatLambdaCDM(
75.0, 0.25, Tcmb0=3.0, Neff=4.2, m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV)
)
assert allclose(
cos.comoving_distance(z),
[2525.58017482, 3706.87633298, 4416.58398847, 4901.96669755] * u.Mpc,
rtol=1e-4,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_massivenu_density():
# Testing neutrino density calculation
# Simple test cosmology, where we compare rho_nu and rho_gamma
# against the exact formula (eq 24/25 of Komatsu et al. 2011)
# computed using Mathematica. The approximation we use for f(y)
# is only good to ~ 0.5% (with some redshift dependence), so that's
# what we test to.
ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
# First try 3 massive neutrinos, all 100 eV -- note this is a universe
# seriously dominated by neutrinos!
tcos = flrw.FlatLambdaCDM(
75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(100.0, u.eV)
)
assert tcos.has_massive_nu
assert tcos.Neff == 3
nurel_exp = (
nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323, 15633.5, 171.801])
)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = flrw.FlatLambdaCDM(
75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.25, u.eV)
)
nurel_exp = (
nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312, 39.1005, 1.11086])
)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236, 0.06999286, 0.1344951])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = flrw.FlatLambdaCDM(
80.0, 0.30, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.01, u.eV)
)
nurel_exp = (
nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348, 1.90671, 1.00021])
)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732, 0.00268404, 0.0978313])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048], rtol=1e-4)
assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534], rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
tcos = flrw.FlatLambdaCDM(
80.0, 0.30, Tcmb0=3.0, Neff=3.04, m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV)
)
nurel_exp = (
nuprefac
* tcos.Neff
* np.array([149.386233, 74.87915, 50.0518, 14.002403, 1.03702333])
)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291, 0.01963451, 0.10227728])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(int)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_elliptic_comoving_distance_z1z2():
"""Regression test for #8388."""
cosmo = flrw.LambdaCDM(70.0, 2.3, 0.05, Tcmb0=0)
z = 0.2
assert allclose(
cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0.0, z)
)
assert allclose(
cosmo._elliptic_comoving_distance_z1z2(0.0, z),
cosmo._integral_comoving_distance_z1z2(0.0, z),
)
SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES = [
flrw.FlatLambdaCDM(H0=70, Om0=0.0, Tcmb0=0.0), # de Sitter
flrw.FlatLambdaCDM(H0=70, Om0=1.0, Tcmb0=0.0), # Einstein - de Sitter
flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0.0), # Hypergeometric
flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.6, Tcmb0=0.0), # Elliptic
]
ITERABLE_REDSHIFTS = [
(0, 1, 2, 3, 4), # tuple
[0, 1, 2, 3, 4], # list
np.array([0, 1, 2, 3, 4]), # array
]
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("cosmo", SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)
@pytest.mark.parametrize("z", ITERABLE_REDSHIFTS)
def test_comoving_distance_iterable_argument(cosmo, z):
"""
Regression test for #10980
Test that specialized comoving distance methods handle iterable arguments.
"""
assert allclose(
cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0.0, z)
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("cosmo", SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)
def test_comoving_distance_broadcast(cosmo):
"""
Regression test for #10980
Test that specialized comoving distance methods broadcast array arguments.
"""
z1 = np.zeros((2, 5))
z2 = np.ones((3, 1, 5))
z3 = np.ones((7, 5))
output_shape = np.broadcast(z1, z2).shape
# Check compatible array arguments return an array with the correct shape
assert cosmo._comoving_distance_z1z2(z1, z2).shape == output_shape
# Check incompatible array arguments raise an error
with pytest.raises(ValueError, match="z1 and z2 have different shapes"):
cosmo._comoving_distance_z1z2(z1, z3)
|
f38455243fd63639e15e1ba8f02c55656210dc94b2461e8b69f5880a2d347c54 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Astropy FLRW classes."""
from . import base, lambdacdm, w0cdm, w0wacdm, w0wzcdm, wpwazpcdm
from .base import *
from .lambdacdm import *
from .w0cdm import *
from .w0wacdm import *
from .w0wzcdm import *
from .wpwazpcdm import *
__all__ = (
base.__all__
+ lambdacdm.__all__
+ w0cdm.__all__
+ w0wacdm.__all__
+ wpwazpcdm.__all__
+ w0wzcdm.__all__
)
def __getattr__(attr):
"""Lazy import deprecated private API."""
base_attrs = (
"H0units_to_invs",
"a_B_c2",
"critdens_const",
"kB_evK",
"radian_in_arcmin",
"radian_in_arcsec",
"sec_to_Gyr",
)
if attr in base_attrs + ("quad",) + ("ellipkinc", "hyp2f1"):
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
from . import base, lambdacdm
msg = (
f"`astropy.cosmology.flrw.{attr}` is a private variable (since "
"v5.1) and in future will raise an exception."
)
warnings.warn(msg, AstropyDeprecationWarning)
if attr in base_attrs:
return getattr(base, "_" + attr)
elif attr == "quad":
return getattr(base, attr)
elif attr in ("ellipkinc", "hyp2f1"):
return getattr(lambdacdm, attr)
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
1b47aea94495d3f5ad42d0f7118400bfef54074be7442d7a21d25e58c2484e1c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy import exp
import astropy.units as u
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.utils import aszarr
from . import scalar_inv_efuncs
from .base import FLRW
__all__ = ["w0wzCDM"]
__doctest_requires__ = {"*": ["scipy"]}
class w0wzCDM(FLRW):
"""
FLRW cosmology with a variable dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the simple form:
:math:`w(z) = w_0 + w_z z`.
This form is not recommended for z > 1.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0. This is pressure/density for
dark energy in units where c=1.
wz : float, optional
Derivative of the dark energy equation of state with respect to z.
A cosmological constant has w0=-1.0 and wz=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import w0wzCDM
>>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float")
wz = Parameter(
doc="Derivative of the dark energy equation of state w.r.t. z.",
fvalidate="float",
)
def __init__(
self,
H0,
Om0,
Ode0,
w0=-1.0,
wz=0.0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=Ode0,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
self.w0 = w0
self.wz = wz
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._w0,
self._wz,
)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0,
self._wz,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._w0,
self._wz,
)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is given by :math:`w(z) = w_0 + w_z z`.
"""
return self._w0 + self._wz * aszarr(z)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)}
\exp \left(-3 w_z z\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
return zp1 ** (3.0 * (1.0 + self._w0 - self._wz)) * exp(-3.0 * self._wz * z)
|
dd7c06d8f21fa6bc3c2c970cd166a75c44328b0f68e9145870de1444a9027578 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
import warnings
from abc import abstractmethod
from math import exp, floor, log, pi, sqrt
from numbers import Number
from typing import Any, Mapping, TypeVar
import numpy as np
from numpy import inf, sin
import astropy.constants as const
import astropy.units as u
from astropy.cosmology.core import Cosmology, FlatCosmologyMixin
from astropy.cosmology.parameter import (
Parameter,
_validate_non_negative,
_validate_with_unit,
)
from astropy.cosmology.utils import aszarr, vectorize_redshift_method
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
# isort: split
if HAS_SCIPY:
from scipy.integrate import quad
else:
def quad(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.integrate'")
__all__ = ["FLRW", "FlatFLRWMixin"]
__doctest_requires__ = {"*": ["scipy"]}
##############################################################################
# Parameters
# Some conversion constants -- useful to compute them once here and reuse in
# the initialization rather than have every object do them.
_H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s)
_sec_to_Gyr = u.s.to(u.Gyr)
# const in critical density in cgs units (g cm^-3)
_critdens_const = (3 / (8 * pi * const.G)).cgs.value
# angle conversions
_radian_in_arcsec = (1 * u.rad).to(u.arcsec)
_radian_in_arcmin = (1 * u.rad).to(u.arcmin)
# Radiation parameter over c^2 in cgs (g cm^-3 K^-4)
_a_B_c2 = (4 * const.sigma_sb / const.c**3).cgs.value
# Boltzmann constant in eV / K
_kB_evK = const.k_B.to(u.eV / u.K)
# typing
_FLRWT = TypeVar("_FLRWT", bound="FLRW")
_FlatFLRWMixinT = TypeVar("_FlatFLRWMixinT", bound="FlatFLRWMixin")
##############################################################################
class FLRW(Cosmology):
"""
A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you cannot instantiate examples of this
class, but must work with one of its subclasses, such as
:class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0. Note that this does not include massive
neutrinos.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Notes
-----
Class instances are immutable -- you cannot change the parameters' values.
That is, all of the above attributes (except meta) are read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
H0 = Parameter(
doc="Hubble constant as an `~astropy.units.Quantity` at z=0.",
unit="km/(s Mpc)",
fvalidate="scalar",
)
Om0 = Parameter(
doc="Omega matter; matter density/critical density at z=0.",
fvalidate="non-negative",
)
Ode0 = Parameter(
doc="Omega dark energy; dark energy density/critical density at z=0.",
fvalidate="float",
)
Tcmb0 = Parameter(
doc="Temperature of the CMB as `~astropy.units.Quantity` at z=0.",
unit="Kelvin",
fvalidate="scalar",
)
Neff = Parameter(
doc="Number of effective neutrino species.", fvalidate="non-negative"
)
m_nu = Parameter(
doc="Mass of neutrino species.", unit="eV", equivalencies=u.mass_energy()
)
Ob0 = Parameter(
doc="Omega baryon; baryonic matter density/critical density at z=0."
)
def __init__(
self,
H0,
Om0,
Ode0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None,
):
super().__init__(name=name, meta=meta)
# Assign (and validate) Parameters
self.H0 = H0
self.Om0 = Om0
self.Ode0 = Ode0
self.Tcmb0 = Tcmb0
self.Neff = Neff
self.m_nu = m_nu # (reset later, this is just for unit validation)
self.Ob0 = Ob0 # (must be after Om0)
# Derived quantities:
# Dark matter density; matter - baryons, if latter is not None.
self._Odm0 = None if Ob0 is None else (self._Om0 - self._Ob0)
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.0
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1
H0_s = self._H0.value * _H0units_to_invs
# Hubble time
self._hubble_time = (_sec_to_Gyr / H0_s) << u.Gyr
# Critical density at z=0 (grams per cubic cm)
cd0value = _critdens_const * H0_s**2
self._critical_density0 = cd0value << u.g / u.cm**3
# Compute photon density from Tcmb
self._Ogamma0 = _a_B_c2 * self._Tcmb0.value**4 / self._critical_density0.value
# Compute Neutrino temperature:
# The constant in front is (4/11)^1/3 -- see any cosmology book for an
# explanation -- for example, Weinberg 'Cosmology' p 154 eq (3.1.21).
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute neutrino parameters:
if self._m_nu is None:
self._nneutrinos = 0
self._neff_per_nu = None
self._massivenu = False
self._massivenu_mass = None
self._nmassivenu = self._nmasslessnu = None
else:
self._nneutrinos = floor(self._Neff)
# We are going to share Neff between the neutrinos equally. In
# detail this is not correct, but it is a standard assumption
# because properly calculating it is a) complicated b) depends on
# the details of the massive neutrinos (e.g., their weak
# interactions, which could be unusual if one is considering
# sterile neutrinos).
self._neff_per_nu = self._Neff / self._nneutrinos
# Now figure out if we have massive neutrinos to deal with, and if
# so, get the right number of masses. It is worth keeping track of
# massless ones separately (since they are easy to deal with, and a
# common use case is to have only one massive neutrino).
massive = np.nonzero(self._m_nu.value > 0)[0]
self._massivenu = massive.size > 0
self._nmassivenu = len(massive)
self._massivenu_mass = (
self._m_nu[massive].value if self._massivenu else None
)
self._nmasslessnu = self._nneutrinos - self._nmassivenu
# Compute Neutrino Omega and total relativistic component for massive
# neutrinos. We also store a list version, since that is more efficient
# to do integrals with (perhaps surprisingly! But small python lists
# are more efficient than small NumPy arrays).
if self._massivenu: # (`_massivenu` set in `m_nu`)
nu_y = self._massivenu_mass / (_kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._nu_y_list = self._nu_y.tolist()
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly The 0.2271...
# is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy
# density) times 7/8 for FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
self._nu_y = self._nu_y_list = None
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
# Subclasses should override this reference if they provide
# more efficient scalar versions of inv_efunc.
self._inv_efunc_scalar = self.inv_efunc
self._inv_efunc_scalar_args = ()
# ---------------------------------------------------------------
# Parameter details
@Ob0.validator
def Ob0(self, param, value):
"""Validate baryon density to None or positive float > matter density."""
if value is None:
return value
value = _validate_non_negative(self, param, value)
if value > self.Om0:
raise ValueError(
"baryonic density can not be larger than total matter density."
)
return value
@m_nu.validator
def m_nu(self, param, value):
"""Validate neutrino masses to right value, units, and shape.
There are no neutrinos if floor(Neff) or Tcmb0 are 0.
The number of neutrinos must match floor(Neff).
Neutrino masses cannot be negative.
"""
# Check if there are any neutrinos
if (nneutrinos := floor(self._Neff)) == 0 or self._Tcmb0.value == 0:
return None # None, regardless of input
# Validate / set units
value = _validate_with_unit(self, param, value)
# Check values and data shapes
if value.shape not in ((), (nneutrinos,)):
raise ValueError(
"unexpected number of neutrino masses — "
f"expected {nneutrinos}, got {len(value)}."
)
elif np.any(value.value < 0):
raise ValueError("invalid (negative) neutrino mass encountered.")
# scalar -> array
if value.isscalar:
value = np.full_like(value, value, shape=nneutrinos)
return value
# ---------------------------------------------------------------
# properties
@property
def is_flat(self):
"""Return bool; `True` if the cosmology is flat."""
return bool((self._Ok0 == 0.0) and (self.Otot0 == 1.0))
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return self._Om0 + self._Ogamma0 + self._Onu0 + self._Ode0 + self._Ok0
@property
def Odm0(self):
"""Omega dark matter; dark matter density/critical density at z=0."""
return self._Odm0
@property
def Ok0(self):
"""Omega curvature; the effective curvature density/critical density at z=0."""
return self._Ok0
@property
def Tnu0(self):
"""
Temperature of the neutrino background as `~astropy.units.Quantity` at z=0.
"""
return self._Tnu0
@property
def has_massive_nu(self):
"""Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def h(self):
"""Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]."""
return self._h
@property
def hubble_time(self):
"""Hubble time as `~astropy.units.Quantity`."""
return self._hubble_time
@property
def hubble_distance(self):
"""Hubble distance as `~astropy.units.Quantity`."""
return self._hubble_distance
@property
def critical_density0(self):
"""Critical density as `~astropy.units.Quantity` at z=0."""
return self._critical_density0
@property
def Ogamma0(self):
"""Omega gamma; the density/critical density of photons at z=0."""
return self._Ogamma0
@property
def Onu0(self):
"""Omega nu; the density/critical density of neutrinos at z=0."""
return self._Onu0
# ---------------------------------------------------------------
@abstractmethod
def w(self, z):
r"""The dark energy equation of state.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
`float` if scalar input.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1.
This must be overridden by subclasses.
"""
raise NotImplementedError("w(z) is not implemented")
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
The total density relative to the critical density at each redshift.
Returns float if input scalar.
"""
return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z)
def Om(self, z):
"""
Return the density parameter for non-relativistic matter
at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Om : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest; see `Onu`.
"""
z = aszarr(z)
return self._Om0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ob(self, z):
"""Return the density parameter for baryonic matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ob : ndarray or float
The density of baryonic matter relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
"""
if self._Ob0 is None:
raise ValueError("Baryon density not set for this cosmology")
z = aszarr(z)
return self._Ob0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Odm(self, z):
"""Return the density parameter for dark matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Odm : ndarray or float
The density of non-relativistic dark matter relative to the
critical density at each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest.
"""
if self._Odm0 is None:
raise ValueError(
"Baryonic density not set for this cosmology, "
"unclear meaning of dark matter density"
)
z = aszarr(z)
return self._Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ok(self, z):
"""
Return the equivalent density parameter for curvature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ok : ndarray or float
The equivalent density parameter for curvature at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ok0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ok0 * (z + 1.0) ** 2 * self.inv_efunc(z) ** 2
def Ode(self, z):
"""Return the density parameter for dark energy at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ode : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ode0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
def Ogamma(self, z):
"""Return the density parameter for photons at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ogamma : ndarray or float
The energy density of photons relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
return self._Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2
def Onu(self, z):
r"""Return the density parameter for neutrinos at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Onu : ndarray or float
The energy density of neutrinos relative to the critical density at
each redshift. Note that this includes their kinetic energy (if
they have mass), so it is not equal to the commonly used
:math:`\sum \frac{m_{\nu}}{94 eV}`, which does not include
kinetic energy.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Onu0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self.Ogamma(z) * self.nu_relative_density(z)
def Tcmb(self, z):
"""Return the CMB temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tcmb : `~astropy.units.Quantity` ['temperature']
The temperature of the CMB in K.
"""
return self._Tcmb0 * (aszarr(z) + 1.0)
def Tnu(self, z):
"""Return the neutrino temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tnu : `~astropy.units.Quantity` ['temperature']
The temperature of the cosmic neutrino background in K.
"""
return self._Tnu0 * (aszarr(z) + 1.0)
def nu_relative_density(self, z):
r"""Neutrino density function relative to the energy density in photons.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
f : ndarray or float
The neutrino density scaling factor relative to the density in
photons at each redshift.
Only returns `float` if z is scalar.
Notes
-----
The density in neutrinos is given by
.. math::
\rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \,
f\left(m_{\nu} a / T_{\nu 0} \right) \,
\rho_{\gamma} \left( a \right)
where
.. math::
f \left(y\right) = \frac{120}{7 \pi^4}
\int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated for each
one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This
method returns :math:`0.2271 f` using an analytical fitting formula
given in Komatsu et al. 2011, ApJS 192, 18.
"""
# Note that there is also a scalar-z-only cython implementation of
# this in scalar_inv_efuncs.pyx, so if you find a problem in this
# you need to update there too.
# See Komatsu et al. 2011, eq 26 and the surrounding discussion
# for an explanation of what we are doing here.
# However, this is modified to handle multiple neutrino masses
# by computing the above for each mass, then summing
prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book
# The massive and massless contribution must be handled separately
# But check for common cases first
z = aszarr(z)
if not self._massivenu:
return (
prefac * self._Neff * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
)
# These are purely fitting constants -- see the Komatsu paper
p = 1.83
invp = 0.54644808743 # 1.0 / p
k = 0.3173
curr_nu_y = self._nu_y / (1.0 + np.expand_dims(z, axis=-1))
rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu
return prefac * self._neff_per_nu * rel_mass
def _w_integrand(self, ln1pz):
"""Internal convenience function for w(z) integral (eq. 5 of [1]_).
Parameters
----------
ln1pz : `~numbers.Number` or scalar ndarray
Assumes scalar input, since this should only be called inside an
integral.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
return 1.0 + self.w(exp(ln1pz) - 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and is given by
.. math::
I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} }
\left[ 1 + w\left( a^{\prime} \right) \right] \right)
The actual integral used is rewritten from [1]_ to be in terms of z.
It will generally helpful for subclasses to overload this method if
the integral can be done analytically for the particular dark
energy equation of state that they implement.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
# This allows for an arbitrary w(z) following eq (5) of
# Linder 2003, PRL 90, 91301. The code here evaluates
# the integral numerically. However, most popular
# forms of w(z) are designed to make this integral analytic,
# so it is probably a good idea for subclasses to overload this
# method if an analytic form is available.
z = aszarr(z)
if not isinstance(z, (Number, np.generic)): # array/Quantity
ival = np.array(
[quad(self._w_integrand, 0, log(1 + redshift))[0] for redshift in z]
)
return np.exp(3 * ival)
else: # scalar
ival = quad(self._w_integrand, 0, log(z + 1.0))[0]
return exp(3 * ival)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
Notes
-----
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(
zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0)
+ self._Ode0 * self.de_density_scale(z)
)
def inv_efunc(self, z):
"""Inverse of ``efunc``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the inverse Hubble constant.
Returns `float` if the input is scalar.
"""
# Avoid the function overhead by repeating code
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (
zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0)
+ self._Ode0 * self.de_density_scale(z)
) ** (-0.5)
def _lookback_time_integrand_scalar(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : float
Input redshift.
Returns
-------
I : float
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0)
def lookback_time_integrand(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : float or array
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return self.inv_efunc(z) / (z + 1.0)
def _abs_distance_integrand_scalar(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
args = self._inv_efunc_scalar_args
return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *args)
def abs_distance_integrand(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float or array
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return (z + 1.0) ** 2 * self.inv_efunc(z)
def H(self, z):
"""Hubble parameter (km/s/Mpc) at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
H : `~astropy.units.Quantity` ['frequency']
Hubble parameter at each input redshift.
"""
return self._H0 * self.efunc(z)
def scale_factor(self, z):
"""Scale factor at redshift ``z``.
The scale factor is defined as :math:`a = 1 / (1 + z)`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
a : ndarray or float
Scale factor at each input redshift.
Returns `float` if the input is scalar.
"""
return 1.0 / (aszarr(z) + 1.0)
def lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a lookback time.
"""
return self._lookback_time(z)
def _lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * self._integral_lookback_time(z)
@vectorize_redshift_method
def _integral_lookback_time(self, z, /):
"""Lookback time to redshift ``z``. Value in units of Hubble time.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
Lookback time to each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._lookback_time_integrand_scalar, 0, z)[0]
def lookback_distance(self, z):
"""
The lookback distance is the light travel time distance to a given
redshift. It is simply c * lookback_time. It may be used to calculate
the proper distance between two redshifts, e.g. for the mean free path
to ionizing radiation.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Lookback distance in Mpc
"""
return (self.lookback_time(z) * const.c).to(u.Mpc)
def age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return self._age(z)
def _age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
This internal function exists to be re-defined for optimizations.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
return self._hubble_time * self._integral_age(z)
@vectorize_redshift_method
def _integral_age(self, z, /):
"""Age of the universe at redshift ``z``. Value in units of Hubble time.
Calculated using explicit integration.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
The age of the universe at each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return quad(self._lookback_time_integrand_scalar, z, inf)[0]
def critical_density(self, z):
"""Critical density in grams per cubic cm at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
rho : `~astropy.units.Quantity`
Critical density in g/cm^3 at each input redshift.
"""
return self._critical_density0 * (self.efunc(z)) ** 2
def comoving_distance(self, z):
"""Comoving line-of-sight distance in Mpc at a given redshift.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc to each input redshift.
"""
return self._comoving_distance_z1z2(0, z)
def _comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._integral_comoving_distance_z1z2(z1, z2)
@vectorize_redshift_method(nin=2)
def _integral_comoving_distance_z1z2_scalar(self, z1, z2, /):
"""
Comoving line-of-sight distance between objects at redshifts ``z1`` and
``z2``. Value in Mpc.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : float or ndarray
Comoving distance in Mpc between each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0]
def _integral_comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``. The comoving distance along the line-of-sight
between two objects remains constant with time for objects in the
Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'] or array-like
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2) # fmt: skip
def comoving_transverse_distance(self, z):
r"""Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero (as in the current
concordance Lambda-CDM model).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
return self._comoving_transverse_distance_z1z2(0, z)
def _comoving_transverse_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z2`` as
seen from redshift ``z1`` corresponding to an angular separation of
1 radian. This is the same as the comoving distance if :math:`\Omega_k`
is zero (as in the current concordance Lambda-CDM model).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc between input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
Ok0 = self._Ok0
dc = self._comoving_distance_z1z2(z1, z2)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self._hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z):
"""Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift ``z`` ([1]_, [2]_, [3]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Angular diameter distance in Mpc at each input redshift.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424.
.. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67.
.. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327.
"""
z = aszarr(z)
return self.comoving_transverse_distance(z) / (z + 1.0)
def luminosity_distance(self, z):
"""Luminosity distance in Mpc at redshift ``z``.
This is the distance to use when converting between the bolometric flux
from an object at redshift ``z`` and its bolometric luminosity [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Luminosity distance in Mpc at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a luminosity distance.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
z = aszarr(z)
return (z + 1.0) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(self, z1, z2):
"""Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing, for example computing the angular
diameter distance between a lensed galaxy and the foreground lens.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. For most practical applications such as
gravitational lensing, ``z2`` should be larger than ``z1``. The
method will work for ``z2 < z1``; however, this will return
negative distances.
Returns
-------
d : `~astropy.units.Quantity`
The angular diameter distance between each input redshift pair.
Returns scalar if input is scalar, array else-wise.
"""
z1, z2 = aszarr(z1), aszarr(z2)
if np.any(z2 < z1):
warnings.warn(
f"Second redshift(s) z2 ({z2}) is less than first "
f"redshift(s) z1 ({z1}).",
AstropyUserWarning,
)
return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0)
@vectorize_redshift_method
def absorption_distance(self, z, /):
"""Absorption distance at redshift ``z``.
This is used to calculate the number of objects with some cross section
of absorption and number density intersecting a sightline per unit
redshift path ([1]_, [2]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : float or ndarray
Absorption distance (dimensionless) at each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
.. [2] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
return quad(self._abs_distance_integrand_scalar, 0, z)[0]
def distmod(self, z):
"""Distance modulus at redshift ``z``.
The distance modulus is defined as the (apparent magnitude - absolute
magnitude) for an object at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
distmod : `~astropy.units.Quantity` ['length']
Distance modulus at each input redshift, in magnitudes.
See Also
--------
z_at_value : Find the redshift corresponding to a distance modulus.
"""
# Remember that the luminosity distance is in Mpc
# Abs is necessary because in certain obscure closed cosmologies
# the distance modulus can be negative -- which is okay because
# it enters as the square.
val = 5.0 * np.log10(abs(self.luminosity_distance(z).value)) + 25.0
return u.Quantity(val, u.mag)
def comoving_volume(self, z):
r"""Comoving volume in cubic Mpc at redshift ``z``.
This is the volume of the universe encompassed by redshifts less than
``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius
`comoving_distance` but it is less intuitive if :math:`\Omega_k` is not.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
V : `~astropy.units.Quantity`
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self._Ok0
if Ok0 == 0:
return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3
dh = self._hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4.0 * pi * dh**3 / (2.0 * Ok0) * u.Mpc**3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsin(term3))
def differential_comoving_volume(self, z):
"""Differential comoving volume at redshift z.
Useful for calculating the effective comoving volume.
For example, allows for integration over a comoving volume that has a
sensitivity function that changes with redshift. The total comoving
volume is given by integrating ``differential_comoving_volume`` to
redshift ``z`` and multiplying by a solid angle.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
dV : `~astropy.units.Quantity`
Differential comoving volume per redshift per steradian at each
input redshift.
"""
dm = self.comoving_transverse_distance(z)
return self._hubble_distance * (dm**2.0) / (self.efunc(z) << u.steradian)
def kpc_comoving_per_arcmin(self, z):
"""
Separation in transverse comoving kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return self.comoving_transverse_distance(z).to(u.kpc) / _radian_in_arcmin
def kpc_proper_per_arcmin(self, z):
"""
Separation in transverse proper kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in proper kpc corresponding to an arcmin at each input
redshift.
"""
return self.angular_diameter_distance(z).to(u.kpc) / _radian_in_arcmin
def arcsec_per_kpc_comoving(self, z):
"""
Angular separation in arcsec corresponding to a comoving kpc at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a comoving kpc at
each input redshift.
"""
return _radian_in_arcsec / self.comoving_transverse_distance(z).to(u.kpc)
def arcsec_per_kpc_proper(self, z):
"""
Angular separation in arcsec corresponding to a proper kpc at redshift
``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a proper kpc at
each input redshift.
"""
return _radian_in_arcsec / self.angular_diameter_distance(z).to(u.kpc)
class FlatFLRWMixin(FlatCosmologyMixin):
"""
Mixin class for flat FLRW cosmologies. Do NOT instantiate directly.
Must precede the base class in the multiple-inheritance so that this
mixin's ``__init__`` proceeds the base class'.
Note that all instances of ``FlatFLRWMixin`` are flat, but not all
flat cosmologies are instances of ``FlatFLRWMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
Ode0 = FLRW.Ode0.clone(derived=True) # same as FLRW, but now a derived param.
def __init_subclass__(cls):
super().__init_subclass__()
if "Ode0" in cls._init_signature.parameters:
raise TypeError(
"subclasses of `FlatFLRWMixin` cannot have `Ode0` in `__init__`"
)
def __init__(self, *args, **kw):
super().__init__(*args, **kw) # guaranteed not to have `Ode0`
# Do some twiddling after the fact to get flatness
self._Ok0 = 0.0
self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0)
@lazyproperty
def nonflat(self: _FlatFLRWMixinT) -> _FLRWT:
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self.__nonflatclass__._init_signature.bind_partial(
**self._init_arguments, Ode0=self.Ode0
)
# Make new instance, respecting args vs kwargs
inst = self.__nonflatclass__(*ba.args, **ba.kwargs)
# Because of machine precision, make sure parameters exactly match
for n in inst.__all_parameters__ + ("Ok0",):
setattr(inst, "_" + n, getattr(self, n))
return inst
def clone(
self, *, meta: Mapping | None = None, to_nonflat: bool = None, **kwargs: Any
):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool or None, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
With 'to_nonflat' `True`, ``Ode0`` can be modified.
>>> Planck13.clone(to_nonflat=True, Ode0=1)
LambdaCDM(name="Planck13 (modified)", H0=67.77 km / (Mpc s),
Om0=0.30712, Ode0=1.0, ...
"""
return super().clone(meta=meta, to_nonflat=to_nonflat, **kwargs)
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return 1.0
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
Returns float if input scalar. Value of 1.
"""
return (
1.0 if isinstance(z, (Number, np.generic)) else np.ones_like(z, subok=False)
)
|
19d29a3125646ffce889d24e9baf76889c2f340f2090d02f213e98f615a5b65a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy import sqrt
import astropy.units as u
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.utils import aszarr
from . import scalar_inv_efuncs
from .base import FLRW, FlatFLRWMixin
__all__ = ["wCDM", "FlatwCDM"]
__doctest_requires__ = {"*": ["scipy"]}
class wCDM(FLRW):
"""
FLRW cosmology with a constant dark energy equation of state and curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import wCDM
>>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
w0 = Parameter(doc="Dark energy equation of state.", fvalidate="float")
def __init__(
self,
H0,
Om0,
Ode0,
w0=-1.0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=Ode0,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
self.w0 = w0
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._w0,
)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = w_0`.
"""
z = aszarr(z)
return self._w0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
:math:`I = \left(1 + z\right)^{3\left(1 + w_0\right)}`
"""
return (aszarr(z) + 1.0) ** (3.0 * (1.0 + self._w0))
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return sqrt(
zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0)
+ self._Ode0 * zp1 ** (3.0 * (1.0 + self._w0))
)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (
zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0)
+ self._Ode0 * zp1 ** (3.0 * (1.0 + self._w0))
) ** (-0.5)
class FlatwCDM(FlatFLRWMixin, wCDM):
"""
FLRW cosmology with a constant dark energy equation of state and no spatial
curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import FlatwCDM
>>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
To get an equivalent cosmology, but of type `astropy.cosmology.wCDM`,
use :attr:`astropy.cosmology.FlatFLRWMixin.nonflat`.
>>> cosmo.nonflat
wCDM(H0=70.0 km / (Mpc s), Om0=0.3, ...
"""
def __init__(
self,
H0,
Om0,
w0=-1.0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=0.0,
w0=w0,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._w0,
)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return sqrt(
zp1**3 * (Or * zp1 + self._Om0)
+ self._Ode0 * zp1 ** (3.0 * (1 + self._w0))
)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (
zp1**3 * (Or * zp1 + self._Om0)
+ self._Ode0 * zp1 ** (3.0 * (1.0 + self._w0))
) ** (-0.5)
|
73bc7bdc4a96065d27fdb802f70f9ef472c4f8e8c915b5ffa5e9cfed4f61ab95 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import acos, cos, inf, sin, sqrt
from numbers import Number
import numpy as np
from numpy import log
import astropy.units as u
from astropy.cosmology.utils import aszarr
from astropy.utils.compat.optional_deps import HAS_SCIPY
from . import scalar_inv_efuncs
from .base import FLRW, FlatFLRWMixin
# isort: split
if HAS_SCIPY:
from scipy.special import ellipkinc, hyp2f1
else:
def ellipkinc(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.special'")
def hyp2f1(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.special'")
__all__ = ["LambdaCDM", "FlatLambdaCDM"]
__doctest_requires__ = {"*": ["scipy"]}
class LambdaCDM(FLRW):
"""FLRW cosmology with a cosmological constant and curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of the cosmological constant in units of
the critical density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import LambdaCDM
>>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(
self,
H0,
Om0,
Ode0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=Ode0,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0)
if self._Ok0 == 0:
self._optimize_flat_norad()
else:
self._comoving_distance_z1z2 = self._elliptic_comoving_distance_z1z2
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0 + self._Onu0,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
)
def _optimize_flat_norad(self):
"""Set optimizations for flat LCDM cosmologies with no radiation."""
# Call out the Om0=0 (de Sitter) and Om0=1 (Einstein-de Sitter)
# The dS case is required because the hypergeometric case
# for Omega_M=0 would lead to an infinity in its argument.
# The EdS case is three times faster than the hypergeometric.
if self._Om0 == 0:
self._comoving_distance_z1z2 = self._dS_comoving_distance_z1z2
self._age = self._dS_age
self._lookback_time = self._dS_lookback_time
elif self._Om0 == 1:
self._comoving_distance_z1z2 = self._EdS_comoving_distance_z1z2
self._age = self._EdS_age
self._lookback_time = self._EdS_lookback_time
else:
self._comoving_distance_z1z2 = self._hypergeometric_comoving_distance_z1z2
self._age = self._flat_age
self._lookback_time = self._flat_lookback_time
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = -1`.
"""
z = aszarr(z)
return -1.0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by :math:`I = 1`.
"""
z = aszarr(z)
return np.ones(z.shape) if hasattr(z, "shape") else 1.0
def _elliptic_comoving_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero.
For :math:`\Omega_{rad} = 0` the comoving distance can be directly
calculated as an elliptic integral [1]_.
Not valid or appropriate for flat cosmologies (Ok0=0).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
References
----------
.. [1] Kantowski, R., Kao, J., & Thomas, R. (2000). Distance-Redshift
in Inhomogeneous FLRW. arXiv e-prints, astro-ph/0002334.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
# The analytic solution is not valid for any of Om0, Ode0, Ok0 == 0.
# Use the explicit integral solution for these cases.
if self._Om0 == 0 or self._Ode0 == 0 or self._Ok0 == 0:
return self._integral_comoving_distance_z1z2(z1, z2)
b = -(27.0 / 2) * self._Om0**2 * self._Ode0 / self._Ok0**3
kappa = b / abs(b)
if (b < 0) or (2 < b):
def phi_z(Om0, Ok0, kappa, y1, A, z):
return np.arccos(
((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 - A)
/ ((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 + A)
)
v_k = pow(kappa * (b - 1) + sqrt(b * (b - 2)), 1.0 / 3)
y1 = (-1 + kappa * (v_k + 1 / v_k)) / 3
A = sqrt(y1 * (3 * y1 + 2))
g = 1 / sqrt(A)
k2 = (2 * A + kappa * (1 + 3 * y1)) / (4 * A)
phi_z1 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z2)
# Get lower-right 0<b<2 solution in Om0, Ode0 plane.
# Fot the upper-left 0<b<2 solution the Big Bang didn't happen.
elif (0 < b) and (b < 2) and self._Om0 > self._Ode0:
def phi_z(Om0, Ok0, y1, y2, z):
return np.arcsin(np.sqrt((y1 - y2) / ((z + 1.0) * Om0 / abs(Ok0) + y1)))
yb = cos(acos(1 - b) / 3)
yc = sqrt(3) * sin(acos(1 - b) / 3)
y1 = (1.0 / 3) * (-1 + yb + yc)
y2 = (1.0 / 3) * (-1 - 2 * yb)
y3 = (1.0 / 3) * (-1 + yb - yc)
g = 2 / sqrt(y1 - y2)
k2 = (y1 - y3) / (y1 - y2)
phi_z1 = phi_z(self._Om0, self._Ok0, y1, y2, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, y1, y2, z2)
else:
return self._integral_comoving_distance_z1z2(z1, z2)
prefactor = self._hubble_distance / sqrt(abs(self._Ok0))
return prefactor * g * (ellipkinc(phi_z1, k2) - ellipkinc(phi_z2, k2))
def _dS_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2`` in a flat, :math:`\Omega_{\Lambda}=1` cosmology
(de Sitter).
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
The de Sitter case has an analytic solution.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
return self._hubble_distance * (z2 - z1)
def _EdS_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2`` in a flat, :math:`\Omega_M=1` cosmology
(Einstein - de Sitter).
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
For :math:`\Omega_M=1`, :math:`\Omega_{rad}=0` the comoving distance
has an analytic solution.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
prefactor = 2 * self._hubble_distance
return prefactor * ((z1 + 1.0) ** (-1.0 / 2) - (z2 + 1.0) ** (-1.0 / 2))
def _hypergeometric_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
For :math:`\Omega_{rad} = 0` the comoving distance can be directly
calculated as a hypergeometric function [1]_.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
References
----------
.. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical
expressions and numerical evaluation of the luminosity distance
in a flat cosmology. MNRAS, 468(1), 927-930.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
s = ((1 - self._Om0) / self._Om0) ** (1.0 / 3)
# Use np.sqrt here to handle negative s (Om0>1).
prefactor = self._hubble_distance / np.sqrt(s * self._Om0)
return prefactor * (
self._T_hypergeometric(s / (z1 + 1.0))
- self._T_hypergeometric(s / (z2 + 1.0))
)
def _T_hypergeometric(self, x):
r"""Compute value using Gauss Hypergeometric function 2F1.
.. math::
T(x) = 2 \sqrt(x) _{2}F_{1}\left(\frac{1}{6}, \frac{1}{2};
\frac{7}{6}; -x^3 \right)
Notes
-----
The :func:`scipy.special.hyp2f1` code already implements the
hypergeometric transformation suggested by Baes et al. [1]_ for use in
actual numerical evaulations.
References
----------
.. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical
expressions and numerical evaluation of the luminosity distance
in a flat cosmology. MNRAS, 468(1), 927-930.
"""
return 2 * np.sqrt(x) * hyp2f1(1.0 / 6, 1.0 / 2, 7.0 / 6, -(x**3))
def _dS_age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
The age of a de Sitter Universe is infinite.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
t = inf if isinstance(z, Number) else np.full_like(z, inf, dtype=float)
return self._hubble_time * t
def _EdS_age(self, z):
r"""Age of the universe in Gyr at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
References
----------
.. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for
standard cosmology. PRD, 62(10), 103507.
"""
return (2.0 / 3) * self._hubble_time * (aszarr(z) + 1.0) ** (-1.5)
def _flat_age(self, z):
r"""Age of the universe in Gyr at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
References
----------
.. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for
standard cosmology. PRD, 62(10), 103507.
"""
# Use np.sqrt, np.arcsinh instead of math.sqrt, math.asinh
# to handle properly the complex numbers for 1 - Om0 < 0
prefactor = (2.0 / 3) * self._hubble_time / np.emath.sqrt(1 - self._Om0)
arg = np.arcsinh(
np.emath.sqrt((1 / self._Om0 - 1 + 0j) / (aszarr(z) + 1.0) ** 3)
)
return (prefactor * arg).real
def _EdS_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral.
The lookback time is here calculated based on the ``age(0) - age(z)``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._EdS_age(0) - self._EdS_age(z)
def _dS_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated.
.. math::
a = exp(H * t) \ \text{where t=0 at z=0}
t = (1/H) (ln 1 - ln a) = (1/H) (0 - ln (1/(1+z))) = (1/H) ln(1+z)
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * log(aszarr(z) + 1.0)
def _flat_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated.
The lookback time is here calculated based on the ``age(0) - age(z)``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._flat_age(0) - self._flat_age(z)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
# We override this because it takes a particularly simple
# form for a cosmological constant
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(
zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0
)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0) ** (
-0.5
)
class FlatLambdaCDM(FlatFLRWMixin, LambdaCDM):
"""FLRW cosmology with a cosmological constant and no curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import FlatLambdaCDM
>>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
To get an equivalent cosmology, but of type `astropy.cosmology.LambdaCDM`,
use :attr:`astropy.cosmology.FlatFLRWMixin.nonflat`.
>>> cosmo.nonflat
LambdaCDM(H0=70.0 km / (Mpc s), Om0=0.3, ...
"""
def __init__(
self,
H0,
Om0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=0.0,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0)
# Repeat the optimization reassignments here because the init
# of the LambaCDM above didn't actually create a flat cosmology.
# That was done through the explicit tweak setting self._Ok0.
self._optimize_flat_norad()
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0 + self._Onu0,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
# We override this because it takes a particularly simple
# form for a cosmological constant
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1**3 * (Or * zp1 + self._Om0) + self._Ode0)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1**3 * (Or * zp1 + self._Om0) + self._Ode0) ** (-0.5)
|
c06065ecf3ecf05e7e42dad82a784bb8c416cc96a8538f1ebec8930515cb3b57 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy import exp
import astropy.units as u
from astropy.cosmology import units as cu
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.utils import aszarr
from . import scalar_inv_efuncs
from .base import FLRW
__all__ = ["wpwaCDM"]
__doctest_requires__ = {"*": ["scipy"]}
class wpwaCDM(FLRW):
r"""
FLRW cosmology with a CPL dark energy equation of state, a pivot redshift,
and curvature.
The equation for the dark energy equation of state uses the CPL form as
described in Chevallier & Polarski [1]_ and Linder [2]_, but modified to
have a pivot redshift as in the findings of the Dark Energy Task Force
[3]_: :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
wp : float, optional
Dark energy equation of state at the pivot redshift zp. This is
pressure/density for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0.
zp : float or quantity-like ['redshift'], optional
Pivot redshift -- the redshift where w(z) = wp
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import wpwaCDM
>>> cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.7, wp=-0.9, wa=0.2, zp=0.4)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
.. [3] Albrecht, A., Amendola, L., Bernstein, G., Clowe, D., Eisenstein,
D., Guzzo, L., Hirata, C., Huterer, D., Kirshner, R., Kolb, E., &
Nichol, R. (2009). Findings of the Joint Dark Energy Mission Figure
of Merit Science Working Group. arXiv e-prints, arXiv:0901.0721.
"""
wp = Parameter(
doc="Dark energy equation of state at the pivot redshift zp.", fvalidate="float"
)
wa = Parameter(
doc="Negative derivative of dark energy equation of state w.r.t. a.",
fvalidate="float",
)
zp = Parameter(doc="The pivot redshift, where w(z) = wp.", unit=cu.redshift)
def __init__(
self,
H0,
Om0,
Ode0,
wp=-1.0,
wa=0.0,
zp=0.0 * cu.redshift,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=Ode0,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
self.wp = wp
self.wa = wa
self.zp = zp
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
apiv = 1.0 / (1.0 + self._zp.value)
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._wp,
apiv,
self._wa,
)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0 + self._Onu0,
self._wp,
apiv,
self._wa,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._wp,
apiv,
self._wa,
)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = w_p + w_a (a_p - a)` where
:math:`a = 1/1+z` and :math:`a_p = 1 / 1 + z_p`.
"""
apiv = 1.0 / (1.0 + self._zp.value)
return self._wp + self._wa * (apiv - 1.0 / (aszarr(z) + 1.0))
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
a_p = \frac{1}{1 + z_p}
I = \left(1 + z\right)^{3 \left(1 + w_p + a_p w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
apiv = 1.0 / (1.0 + self._zp.value)
return zp1 ** (3.0 * (1.0 + self._wp + apiv * self._wa)) * exp(
-3.0 * self._wa * z / zp1
)
|
deb3bac06ffb43ec8502d3b4bf015025df355e92d26e67df513216193a1a499d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy import exp
import astropy.units as u
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.utils import aszarr
from . import scalar_inv_efuncs
from .base import FLRW, FlatFLRWMixin
__all__ = ["w0waCDM", "Flatw0waCDM"]
__doctest_requires__ = {"*": ["scipy"]}
class w0waCDM(FLRW):
r"""FLRW cosmology with a CPL dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the
CPL form as described in Chevallier & Polarski [1]_ and Linder [2]_:
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import w0waCDM
>>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float")
wa = Parameter(
doc="Negative derivative of dark energy equation of state w.r.t. a.",
fvalidate="float",
)
def __init__(
self,
H0,
Om0,
Ode0,
w0=-1.0,
wa=0.0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=Ode0,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
self.w0 = w0
self.wa = wa
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._w0,
self._wa,
)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0,
self._wa,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._w0,
self._wa,
)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is
:math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \frac{z}{1+z}`.
"""
z = aszarr(z)
return self._w0 + self._wa * z / (z + 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 + w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
return zp1 ** (3 * (1 + self._w0 + self._wa)) * exp(-3 * self._wa * z / zp1)
class Flatw0waCDM(FlatFLRWMixin, w0waCDM):
"""FLRW cosmology with a CPL dark energy equation of state and no
curvature.
The equation for the dark energy equation of state uses the CPL form as
described in Chevallier & Polarski [1]_ and Linder [2]_:
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import Flatw0waCDM
>>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
To get an equivalent cosmology, but of type `astropy.cosmology.w0waCDM`,
use :attr:`astropy.cosmology.FlatFLRWMixin.nonflat`.
>>> cosmo.nonflat
w0waCDM(H0=70.0 km / (Mpc s), Om0=0.3, ...
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
def __init__(
self,
H0,
Om0,
w0=-1.0,
wa=0.0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=0.0,
w0=w0,
wa=wa,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0,
self._wa,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._w0,
self._wa,
)
|
93add7146afeecc247afc459062d7f428c0c035aafca463f896973ad1fea4093 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for :mod:`astropy.cosmology.comparison`"""
import re
import numpy as np
import pytest
from astropy.cosmology import Cosmology, FlatCosmologyMixin, Planck18, cosmology_equal
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.funcs.comparison import (
_cosmology_not_equal,
_CosmologyWrapper,
_parse_format,
_parse_formats,
)
from astropy.cosmology.io.tests.base import ToFromTestMixinBase
class ComparisonFunctionTestBase(ToFromTestMixinBase):
"""Tests for cosmology comparison functions.
This class inherits from
`astropy.cosmology.io.tests.base.ToFromTestMixinBase` because the cosmology
comparison functions all have a kwarg ``format`` that allow the arguments to
be converted to a |Cosmology| using the ``to_format`` architecture.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must be
inherited in a subclass.
"""
@pytest.fixture(scope="class")
def cosmo(self):
return Planck18
@pytest.fixture(scope="class")
def cosmo_eqvxflat(self, cosmo):
if isinstance(cosmo, FlatCosmologyMixin):
return cosmo.nonflat
pytest.skip(
"cosmology is not flat, so does not have an equivalent non-flat cosmology."
)
@pytest.fixture(
scope="class",
params={k for k, _ in convert_registry._readers.keys()} - {"astropy.cosmology"},
)
def format(self, request):
return request.param
@pytest.fixture(scope="class")
def xfail_cant_autoidentify(self, format):
"""`pytest.fixture` form of method ``can_autoidentify`."""
if not self.can_autodentify(format):
pytest.xfail("cannot autoidentify")
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
return to_format(format)
@pytest.fixture(scope="class")
def pert_cosmo(self, cosmo):
# change one parameter
p = cosmo.__parameters__[0]
v = getattr(cosmo, p)
cosmo2 = cosmo.clone(
**{p: v * 1.0001 if v != 0 else 0.001 * getattr(v, "unit", 1)}
)
return cosmo2
@pytest.fixture(scope="class")
def pert_cosmo_eqvxflat(self, pert_cosmo):
if isinstance(pert_cosmo, FlatCosmologyMixin):
return pert_cosmo.nonflat
pytest.skip(
"cosmology is not flat, so does not have an equivalent non-flat cosmology."
)
@pytest.fixture(scope="class")
def pert_converted(self, pert_cosmo, format):
if format == "astropy.model": # special case Model
return pert_cosmo.to_format(format, method="comoving_distance")
return pert_cosmo.to_format(format)
class Test_parse_format(ComparisonFunctionTestBase):
"""Test functions ``_parse_format``."""
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
converted = to_format(format)
# Some raise a segfault! TODO: figure out why
if isinstance(converted, _CosmologyWrapper._cantbroadcast):
converted = _CosmologyWrapper(converted)
return converted
# ========================================================================
def test_shortcut(self, cosmo):
"""Test the already-a-cosmology shortcut."""
# A Cosmology
for fmt in {None, True, False, "astropy.cosmology"}:
assert _parse_format(cosmo, fmt) is cosmo, f"{fmt} failed"
# A Cosmology, but improperly formatted
# see ``test_parse_format_error_wrong_format``.
def test_convert(self, converted, format, cosmo):
"""Test converting a cosmology-like object"""
out = _parse_format(converted, format)
assert isinstance(out, Cosmology)
assert out == cosmo
def test_parse_format_error_wrong_format(self, cosmo):
"""
Test ``_parse_format`` errors when given a Cosmology object and format
is not compatible.
"""
with pytest.raises(
ValueError, match=re.escape("for parsing a Cosmology, 'format'")
):
_parse_format(cosmo, "mapping")
def test_parse_format_error_noncosmology_cant_convert(self):
"""
Test ``_parse_format`` errors when given a non-Cosmology object
and format is `False`.
"""
notacosmo = object()
with pytest.raises(TypeError, match=re.escape("if 'format' is False")):
_parse_format(notacosmo, False)
def test_parse_format_vectorized(self, cosmo, format, converted):
# vectorized on cosmos
out = _parse_format([cosmo, cosmo], None)
assert len(out) == 2
assert np.all(out == cosmo)
# vectorized on formats
out = _parse_format(cosmo, [None, None])
assert len(out) == 2
assert np.all(out == cosmo)
# more complex broadcast
out = _parse_format(
[[cosmo, converted], [converted, cosmo]], [[None, format], [format, None]]
)
assert out.shape == (2, 2)
assert np.all(out == cosmo)
def test_parse_formats_vectorized(self, cosmo):
# vectorized on cosmos
out = _parse_formats(cosmo, cosmo, format=None)
assert len(out) == 2
assert np.all(out == cosmo)
# does NOT vectorize on formats
with pytest.raises(ValueError, match="operands could not be broadcast"):
_parse_formats(cosmo, format=[None, None])
class Test_cosmology_equal(ComparisonFunctionTestBase):
"""Test :func:`astropy.cosmology.comparison.cosmology_equal`"""
def test_cosmology_equal_simple(self, cosmo, pert_cosmo):
# equality
assert cosmology_equal(cosmo, cosmo) is True
# not equal to perturbed cosmology
assert cosmology_equal(cosmo, pert_cosmo) is False
def test_cosmology_equal_equivalent(
self, cosmo, cosmo_eqvxflat, pert_cosmo, pert_cosmo_eqvxflat
):
# now need to check equivalent, but not equal, cosmologies.
assert cosmology_equal(cosmo, cosmo_eqvxflat, allow_equivalent=True) is True
assert cosmology_equal(cosmo, cosmo_eqvxflat, allow_equivalent=False) is False
assert (
cosmology_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=True)
is True
)
assert (
cosmology_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=False)
is False
)
def test_cosmology_equal_too_many_cosmo(self, cosmo):
with pytest.raises(
TypeError, match="cosmology_equal takes 2 positional arguments"
):
cosmology_equal(cosmo, cosmo, cosmo)
def test_cosmology_equal_format_error(self, cosmo, converted):
# Not converting `converted`
with pytest.raises(TypeError):
cosmology_equal(cosmo, converted)
with pytest.raises(TypeError):
cosmology_equal(cosmo, converted, format=False)
def test_cosmology_equal_format_auto(
self, cosmo, converted, xfail_cant_autoidentify
):
# These tests only run if the format can autoidentify.
assert cosmology_equal(cosmo, converted, format=None) is True
assert cosmology_equal(cosmo, converted, format=True) is True
def test_cosmology_equal_format_specify(
self, cosmo, format, converted, pert_converted
):
# equality
assert cosmology_equal(cosmo, converted, format=[None, format]) is True
assert cosmology_equal(converted, cosmo, format=[format, None]) is True
# non-equality
assert cosmology_equal(cosmo, pert_converted, format=[None, format]) is False
def test_cosmology_equal_equivalent_format_specify(
self, cosmo, format, converted, cosmo_eqvxflat
):
# specifying the format
assert (
cosmology_equal(
cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=True
)
is True
)
assert (
cosmology_equal(
converted, cosmo_eqvxflat, format=[format, None], allow_equivalent=True
)
is True
)
class Test_cosmology_not_equal(ComparisonFunctionTestBase):
"""Test :func:`astropy.cosmology.comparison._cosmology_not_equal`"""
def test_cosmology_not_equal_simple(self, cosmo, pert_cosmo):
# equality
assert _cosmology_not_equal(cosmo, cosmo) is False
# not equal to perturbed cosmology
assert _cosmology_not_equal(cosmo, pert_cosmo) is True
def test_cosmology_not_equal_too_many_cosmo(self, cosmo):
with pytest.raises(TypeError, match="_cosmology_not_equal takes 2 positional"):
_cosmology_not_equal(cosmo, cosmo, cosmo)
def test_cosmology_not_equal_equivalent(
self, cosmo, cosmo_eqvxflat, pert_cosmo, pert_cosmo_eqvxflat
):
# now need to check equivalent, but not equal, cosmologies.
assert (
_cosmology_not_equal(cosmo, cosmo_eqvxflat, allow_equivalent=False) is True
)
assert (
_cosmology_not_equal(cosmo, cosmo_eqvxflat, allow_equivalent=True) is False
)
assert (
_cosmology_not_equal(
pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=False
)
is True
)
assert (
_cosmology_not_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=True)
is False
)
def test_cosmology_not_equal_format_error(self, cosmo, converted):
# Not converting `converted`
with pytest.raises(TypeError):
_cosmology_not_equal(cosmo, converted)
with pytest.raises(TypeError):
_cosmology_not_equal(cosmo, converted, format=False)
def test_cosmology_not_equal_format_auto(
self, cosmo, pert_converted, xfail_cant_autoidentify
):
assert _cosmology_not_equal(cosmo, pert_converted, format=None) is True
assert _cosmology_not_equal(cosmo, pert_converted, format=True) is True
def test_cosmology_not_equal_format_specify(
self, cosmo, format, converted, pert_converted
):
# specifying the format
assert (
_cosmology_not_equal(cosmo, pert_converted, format=[None, format]) is True
)
assert (
_cosmology_not_equal(pert_converted, cosmo, format=[format, None]) is True
)
# equality
assert _cosmology_not_equal(cosmo, converted, format=[None, format]) is False
def test_cosmology_not_equal_equivalent_format_specify(
self, cosmo, format, converted, cosmo_eqvxflat
):
# specifying the format
assert (
_cosmology_not_equal(
cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=False
)
is True
)
assert (
_cosmology_not_equal(
cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=True
)
is False
)
assert (
_cosmology_not_equal(
converted, cosmo_eqvxflat, format=[format, None], allow_equivalent=True
)
is False
)
|
80226a2ccd83f11c85340cb0ee87d913ffbdfb0080b7412f9468cd290dba44b0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import sys
from io import StringIO
import numpy as np
import pytest
from astropy import units as u
from astropy.cosmology import core, flrw
from astropy.cosmology.funcs import _z_at_scalar_value, z_at_value
from astropy.cosmology.realizations import (
WMAP1,
WMAP3,
WMAP5,
WMAP7,
WMAP9,
Planck13,
Planck15,
Planck18,
)
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_scalar():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
cosmo = Planck13
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.19812268, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr), 0.795198375, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag), 1.991389168, rtol=1e-6)
assert allclose(
z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc), 1.36857907, rtol=1e-6
)
assert allclose(
z_at_value(cosmo.luminosity_distance, 26.037193804 * u.Gpc, ztol=1e-10),
3,
rtol=1e-9,
)
assert allclose(
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmax=2),
0.681277696,
rtol=1e-6,
)
assert allclose(
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=2.5),
3.7914908,
rtol=1e-6,
)
# test behavior when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=4.0)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
class Test_ZatValue:
def setup_class(self):
self.cosmo = Planck13
def test_broadcast_arguments(self):
"""Test broadcast of arguments."""
# broadcasting main argument
assert allclose(
z_at_value(self.cosmo.age, [2, 7] * u.Gyr),
[3.1981206134773115, 0.7562044333305182],
rtol=1e-6,
)
# basic broadcast of secondary arguments
assert allclose(
z_at_value(
self.cosmo.angular_diameter_distance,
1500 * u.Mpc,
zmin=[0, 2.5],
zmax=[2, 4],
),
[0.681277696, 3.7914908],
rtol=1e-6,
)
# more interesting broadcast
assert allclose(
z_at_value(
self.cosmo.angular_diameter_distance,
1500 * u.Mpc,
zmin=[[0, 2.5]],
zmax=[2, 4],
),
[[0.681277696, 3.7914908]],
rtol=1e-6,
)
def test_broadcast_bracket(self):
"""`bracket` has special requirements."""
# start with an easy one
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=None),
3.1981206134773115,
rtol=1e-6,
)
# now actually have a bracket
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4]),
3.1981206134773115,
rtol=1e-6,
)
# now a bad length
with pytest.raises(ValueError, match="sequence"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4, 4, 5])
# now the wrong dtype : an ndarray, but not an object array
with pytest.raises(TypeError, match="dtype"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=np.array([0, 4]))
# now an object array of brackets
bracket = np.array([[0, 4], [0, 3, 4]], dtype=object)
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=bracket),
[3.1981206134773115, 3.1981206134773115],
rtol=1e-6,
)
def test_bad_broadcast(self):
"""Shapes mismatch as expected"""
with pytest.raises(ValueError, match="broadcast"):
z_at_value(
self.cosmo.angular_diameter_distance,
1500 * u.Mpc,
zmin=[0, 2.5, 0.1],
zmax=[2, 4],
)
def test_scalar_input_to_output(self):
"""Test scalar input returns a scalar."""
z = z_at_value(
self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=0, zmax=2
)
assert isinstance(z, u.Quantity)
assert z.dtype == np.float64
assert z.shape == ()
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_numpyvectorize():
"""Test that numpy vectorize fails on Quantities.
If this test starts failing then numpy vectorize can be used instead of
the home-brewed vectorization. Please submit a PR making the change.
"""
z_at_value = np.vectorize(
_z_at_scalar_value, excluded=["func", "method", "verbose"]
)
with pytest.raises(u.UnitConversionError, match="dimensionless quantities"):
z_at_value(Planck15.age, 10 * u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_verbose(monkeypatch):
cosmo = Planck13
# Test the "verbose" flag. Since this uses "print", need to mod stdout
mock_stdout = StringIO()
monkeypatch.setattr(sys, "stdout", mock_stdout)
resx = z_at_value(cosmo.age, 2 * u.Gyr, verbose=True)
assert str(resx.value) in mock_stdout.getvalue() # test "verbose" prints res
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("method", ["Brent", "Golden", "Bounded"])
def test_z_at_value_bracketed(method):
"""
Test 2 solutions for angular diameter distance by not constraining zmin, zmax,
but setting `bracket` on the appropriate side of the turning point z.
Setting zmin / zmax should override `bracket`.
"""
cosmo = Planck13
if method == "Bounded":
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z = z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method)
if z > 1.6:
z = 3.7914908
bracket = (0.9, 1.5)
else:
z = 0.6812777
bracket = (1.6, 2.0)
with pytest.warns(UserWarning, match=r"Option 'bracket' is ignored"):
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=bracket,
),
z,
rtol=1e-6,
)
else:
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.3, 1.0),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(2.0, 4.0),
),
3.7914908,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.1, 1.5),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.1, 1.0, 2.0),
),
0.6812777,
rtol=1e-6,
)
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.9, 1.5),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(1.6, 2.0),
),
3.7914908,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(1.6, 2.0),
zmax=1.6,
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.9, 1.5),
zmin=1.5,
),
3.7914908,
rtol=1e-6,
)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(3.9, 5.0),
zmin=4.0,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("method", ["Brent", "Golden", "Bounded"])
def test_z_at_value_unconverged(method):
"""
Test warnings on non-converged solution when setting `maxfun` to too small iteration number -
only 'Bounded' returns status value and specific message.
"""
cosmo = Planck18
ztol = {"Brent": [1e-4, 1e-4], "Golden": [1e-3, 1e-2], "Bounded": [1e-3, 1e-1]}
if method == "Bounded":
ctx = pytest.warns(
AstropyUserWarning,
match="Solver returned 1: Maximum number of function calls reached",
)
else:
ctx = pytest.warns(AstropyUserWarning, match="Solver returned None")
with ctx:
z0 = z_at_value(
cosmo.angular_diameter_distance, 1 * u.Gpc, zmax=2, maxfun=13, method=method
)
with ctx:
z1 = z_at_value(
cosmo.angular_diameter_distance, 1 * u.Gpc, zmin=2, maxfun=13, method=method
)
assert allclose(z0, 0.32442, rtol=ztol[method][0])
assert allclose(z1, 8.18551, rtol=ztol[method][1])
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize(
"cosmo",
[
Planck13,
Planck15,
Planck18,
WMAP1,
WMAP3,
WMAP5,
WMAP7,
WMAP9,
flrw.LambdaCDM,
flrw.FlatLambdaCDM,
flrw.wpwaCDM,
flrw.w0wzCDM,
flrw.wCDM,
flrw.FlatwCDM,
flrw.w0waCDM,
flrw.Flatw0waCDM,
],
)
def test_z_at_value_roundtrip(cosmo):
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck cosmologies
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone is not a redshift-dependent method
# nu_relative_density is not redshift-dependent in the WMAP cosmologies
skip = (
"Ok",
"Otot",
"angular_diameter_distance_z1z2",
"clone",
"is_equivalent",
"de_density_scale",
"w",
)
if str(cosmo.name).startswith("WMAP"):
skip += ("nu_relative_density",)
methods = inspect.getmembers(cosmo, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith("_") or name in skip:
continue
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
got = z_at_value(func, fval, bracket=[0.3, 1.0], ztol=1e-12)
assert allclose(got, z, rtol=2e-11), f"Round-trip testing {name} failed"
# Test distance functions between two redshifts; only for realizations
if isinstance(cosmo.name, str):
z2 = 2.0
func_z1z2 = [
lambda z1: cosmo._comoving_distance_z1z2(z1, z2),
lambda z1: cosmo._comoving_transverse_distance_z1z2(z1, z2),
lambda z1: cosmo.angular_diameter_distance_z1z2(z1, z2),
]
for func in func_z1z2:
fval = func(z)
assert allclose(z, z_at_value(func, fval, zmax=1.5, ztol=1e-12), rtol=2e-11)
|
a3e2fc909a824afd7988bca7b1c32ac52de1f983a68d6dc27ec081d6d2aa582a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import inspect
import random
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy.cosmology.core import Cosmology
from astropy.cosmology.io.model import _CosmologyModel, from_model, to_model
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.modeling.models import Gaussian1D
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromModelTestMixin(ToFromTestMixinBase):
"""Tests for a Cosmology[To/From]Format with ``format="astropy.model"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.fixture(scope="class")
def method_name(self, cosmo):
# get methods, ignoring private and dunder
methods = get_redshift_methods(cosmo, include_private=False, include_z2=True)
# dynamically detect ABC and optional dependencies
for n in tuple(methods):
params = inspect.signature(getattr(cosmo, n)).parameters.keys()
ERROR_SEIVE = (NotImplementedError, ValueError)
# # ABC can't introspect for good input
if not HAS_SCIPY:
ERROR_SEIVE = ERROR_SEIVE + (ModuleNotFoundError,)
args = np.arange(len(params)) + 1
try:
getattr(cosmo, n)(*args)
except ERROR_SEIVE:
methods.discard(n)
# TODO! pytest doesn't currently allow multiple yields (`cosmo`) so
# testing with 1 random method
# yield from methods
return random.choice(tuple(methods)) if methods else None
# ===============================================================
def test_fromformat_model_wrong_cls(self, from_format):
"""Test when Model is not the correct class."""
model = Gaussian1D(amplitude=10, mean=14)
with pytest.raises(AttributeError):
from_format(model)
def test_toformat_model_not_method(self, to_format):
"""Test when method is not a method."""
with pytest.raises(AttributeError):
to_format("astropy.model", method="this is definitely not a method.")
def test_toformat_model_not_callable(self, to_format):
"""Test when method is actually an attribute."""
with pytest.raises(ValueError):
to_format("astropy.model", method="name")
def test_toformat_model(self, cosmo, to_format, method_name):
"""Test cosmology -> astropy.model."""
if method_name is None: # no test if no method
return
model = to_format("astropy.model", method=method_name)
assert isinstance(model, _CosmologyModel)
# Parameters
expect = tuple(n for n in cosmo.__parameters__ if getattr(cosmo, n) is not None)
assert model.param_names == expect
# scalar result
args = np.arange(model.n_inputs) + 1
got = model.evaluate(*args)
expected = getattr(cosmo, method_name)(*args)
assert np.all(got == expected)
got = model(*args)
expected = getattr(cosmo, method_name)(*args)
np.testing.assert_allclose(got, expected)
# vector result
if "scalar" not in method_name:
args = (np.ones((model.n_inputs, 3)).T + np.arange(model.n_inputs)).T
got = model.evaluate(*args)
expected = getattr(cosmo, method_name)(*args)
assert np.all(got == expected)
got = model(*args)
expected = getattr(cosmo, method_name)(*args)
np.testing.assert_allclose(got, expected)
def test_tofromformat_model_instance(
self, cosmo_cls, cosmo, method_name, to_format, from_format
):
"""Test cosmology -> astropy.model -> cosmology."""
if method_name is None: # no test if no method
return
# ------------
# To Model
# this also serves as a test of all added methods / attributes
# in _CosmologyModel.
model = to_format("astropy.model", method=method_name)
assert isinstance(model, _CosmologyModel)
assert model.cosmology_class is cosmo_cls
assert model.cosmology == cosmo
assert model.method_name == method_name
# ------------
# From Model
# it won't error if everything matches up
got = from_format(model, format="astropy.model")
assert got == cosmo
assert set(cosmo.meta.keys()).issubset(got.meta.keys())
# Note: model adds parameter attributes to the metadata
# also it auto-identifies 'format'
got = from_format(model)
assert got == cosmo
assert set(cosmo.meta.keys()).issubset(got.meta.keys())
def test_fromformat_model_subclass_partial_info(self):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
pass # there's no partial information with a Model
@pytest.mark.parametrize("format", [True, False, None, "astropy.model"])
def test_is_equivalent_to_model(self, cosmo, method_name, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a model.
"""
if method_name is None: # no test if no method
return
obj = to_format("astropy.model", method=method_name)
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (True if format is not False else False)
class TestToFromModel(ToFromDirectTestBase, ToFromModelTestMixin):
"""Directly test ``to/from_model``."""
def setup_class(self):
self.functions = {"to": to_model, "from": from_model}
|
7628fcd4c03a08253787c7a188ff5d00ff3b785df3e1f77246d7cb678e660e9c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import inspect
from collections import OrderedDict
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy.cosmology import Cosmology
from astropy.cosmology.io.mapping import from_mapping, to_mapping
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromMappingTestMixin(ToFromTestMixinBase):
"""Tests for a Cosmology[To/From]Format with ``format="mapping"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_mapping_default(self, cosmo, to_format):
"""Test default usage of Cosmology -> mapping."""
m = to_format("mapping")
keys = tuple(m.keys())
assert isinstance(m, dict)
# Check equality of all expected items
assert keys[0] == "cosmology"
assert m.pop("cosmology") is cosmo.__class__
assert keys[1] == "name"
assert m.pop("name") == cosmo.name
for i, k in enumerate(cosmo.__parameters__, start=2):
assert keys[i] == k
assert np.array_equal(m.pop(k), getattr(cosmo, k))
assert keys[-1] == "meta"
assert m.pop("meta") == cosmo.meta
# No unexpected items
assert not m
def test_to_mapping_wrong_cls(self, to_format):
"""Test incorrect argument ``cls`` in ``to_mapping()``."""
with pytest.raises(TypeError, match="'cls' must be"):
to_format("mapping", cls=list)
@pytest.mark.parametrize("map_cls", [dict, OrderedDict])
def test_to_mapping_cls(self, to_format, map_cls):
"""Test argument ``cls`` in ``to_mapping()``."""
m = to_format("mapping", cls=map_cls)
assert isinstance(m, map_cls) # test type
def test_to_mapping_cosmology_as_str(self, cosmo_cls, to_format):
"""Test argument ``cosmology_as_str`` in ``to_mapping()``."""
default = to_format("mapping")
# Cosmology is the class
m = to_format("mapping", cosmology_as_str=False)
assert inspect.isclass(m["cosmology"])
assert cosmo_cls is m["cosmology"]
assert m == default # False is the default option
# Cosmology is a string
m = to_format("mapping", cosmology_as_str=True)
assert isinstance(m["cosmology"], str)
assert m["cosmology"] == cosmo_cls.__qualname__ # Correct class
assert tuple(m.keys())[0] == "cosmology" # Stayed at same index
def test_tofrom_mapping_cosmology_as_str(self, cosmo, to_format, from_format):
"""Test roundtrip with ``cosmology_as_str=True``.
The test for the default option (`False`) is in ``test_tofrom_mapping_instance``.
"""
m = to_format("mapping", cosmology_as_str=True)
got = from_format(m, format="mapping")
assert got == cosmo
assert got.meta == cosmo.meta
def test_to_mapping_move_from_meta(self, to_format):
"""Test argument ``move_from_meta`` in ``to_mapping()``."""
default = to_format("mapping")
# Metadata is 'separate' from main mapping
m = to_format("mapping", move_from_meta=False)
assert "meta" in m.keys()
assert not any([k in m for k in m["meta"]]) # Not added to main
assert m == default # False is the default option
# Metadata is mixed into main mapping.
m = to_format("mapping", move_from_meta=True)
assert "meta" not in m.keys()
assert all([k in m for k in default["meta"]]) # All added to main
# The parameters take precedence over the metadata
assert all([np.array_equal(v, m[k]) for k, v in default.items() if k != "meta"])
def test_tofrom_mapping_move_tofrom_meta(self, cosmo, to_format, from_format):
"""Test roundtrip of ``move_from/to_meta`` in ``to/from_mapping()``."""
# Metadata is mixed into main mapping.
m = to_format("mapping", move_from_meta=True)
# (Just adding something to ensure there's 'metadata')
m["mismatching"] = "will error"
# (Tests are different if the last argument is a **kwarg)
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(m, format="mapping")
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# Reading with mismatching parameters errors...
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(m, format="mapping")
# unless mismatched are moved to meta.
got = from_format(m, format="mapping", move_to_meta=True)
assert got == cosmo # (Doesn't check metadata)
assert got.meta["mismatching"] == "will error"
# -----------------------------------------------------
def test_from_not_mapping(self, cosmo, from_format):
"""Test incorrect map type in ``from_mapping()``."""
with pytest.raises((TypeError, ValueError)):
from_format("NOT A MAP", format="mapping")
def test_from_mapping_default(self, cosmo, to_format, from_format):
"""Test (cosmology -> Mapping) -> cosmology."""
m = to_format("mapping")
# Read from exactly as given.
got = from_format(m, format="mapping")
assert got == cosmo
assert got.meta == cosmo.meta
# Reading auto-identifies 'format'
got = from_format(m)
assert got == cosmo
assert got.meta == cosmo.meta
def test_fromformat_subclass_partial_info_mapping(self, cosmo):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
m = cosmo.to_format("mapping")
# partial information
m.pop("cosmology", None)
m.pop("Tcmb0", None)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo.__class__.from_format(m, format="mapping")
got2 = Cosmology.from_format(m, format="mapping", cosmology=cosmo.__class__)
got3 = Cosmology.from_format(
m, format="mapping", cosmology=cosmo.__class__.__qualname__
)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo.__class__._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
@pytest.mark.parametrize("format", [True, False, None, "mapping"])
def test_is_equivalent_to_mapping(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a mapping.
"""
obj = to_format("mapping")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (True if format is not False else False)
class TestToFromMapping(ToFromDirectTestBase, ToFromMappingTestMixin):
"""Directly test ``to/from_mapping``."""
def setup_class(self):
self.functions = {"to": to_mapping, "from": from_mapping}
@pytest.mark.skip("N/A")
def test_fromformat_subclass_partial_info_mapping(self):
"""This test does not apply to the direct functions."""
|
0aab044c5bc34ac14365281e17b97a2bb47f32fa75dacb95a8fe99ac3beb8e6d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
# LOCAL
from astropy.cosmology import Cosmology
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.io.table import from_table, to_table
from astropy.table import QTable, Table, vstack
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromTableTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="astropy.table"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_table_bad_index(self, from_format, to_format):
"""Test if argument ``index`` is incorrect"""
tbl = to_format("astropy.table")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
from_format(tbl, index=2, format="astropy.table")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
from_format(tbl, index="row 0", format="astropy.table")
# -----------------------
def test_to_table_failed_cls(self, to_format):
"""Test failed table type."""
with pytest.raises(TypeError, match="'cls' must be"):
to_format("astropy.table", cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_table_cls(self, to_format, tbl_cls):
tbl = to_format("astropy.table", cls=tbl_cls)
assert isinstance(tbl, tbl_cls) # test type
# -----------------------
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_table_in_meta(self, cosmo_cls, to_format, in_meta):
"""Test where the cosmology class is placed."""
tbl = to_format("astropy.table", cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
if in_meta:
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.colnames # not also a column
else:
assert tbl["cosmology"][0] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.meta
# -----------------------
def test_to_table(self, cosmo_cls, cosmo, to_format):
"""Test cosmology -> astropy.table."""
tbl = to_format("astropy.table")
# Test properties of Table.
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl["name"] == cosmo.name
assert tbl.indices # indexed
# Test each Parameter column has expected information.
for n in cosmo.__parameters__:
P = getattr(cosmo_cls, n) # Parameter
col = tbl[n] # Column
# Compare the two
assert col.info.name == P.name
assert col.info.description == P.__doc__
assert col.info.meta == (cosmo.meta.get(n) or {})
# -----------------------
def test_from_not_table(self, cosmo, from_format):
"""Test not passing a Table to the Table parser."""
with pytest.raises((TypeError, ValueError)):
from_format("NOT A TABLE", format="astropy.table")
def test_tofrom_table_instance(self, cosmo_cls, cosmo, from_format, to_format):
"""Test cosmology -> astropy.table -> cosmology."""
tbl = to_format("astropy.table")
# add information
tbl["mismatching"] = "will error"
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(tbl, format="astropy.table")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(tbl, format="astropy.table")
# unless mismatched are moved to meta
got = from_format(tbl, format="astropy.table", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
tbl.remove_column("mismatching")
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``to_format``.
tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]]
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# also it auto-identifies 'format'
got = from_format(tbl)
assert got == cosmo
def test_fromformat_table_subclass_partial_info(
self, cosmo_cls, cosmo, from_format, to_format
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
# test to_format
tbl = to_format("astropy.table")
assert isinstance(tbl, QTable)
# partial information
tbl.meta.pop("cosmology", None)
del tbl["Tcmb0"]
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.from_format(tbl, format="astropy.table")
got2 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls)
got3 = from_format(
tbl, format="astropy.table", cosmology=cosmo_cls.__qualname__
)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
@pytest.mark.parametrize("add_index", [True, False])
def test_tofrom_table_mutlirow(self, cosmo_cls, cosmo, from_format, add_index):
"""Test if table has multiple rows."""
# ------------
# To Table
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
tbl = vstack(
[c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts="silent",
)
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl[1]["name"] == cosmo.name
# whether to add an index. `from_format` can work with or without.
if add_index:
tbl.add_index("name", unique=True)
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
from_format(tbl, format="astropy.table")
# unless the index argument is provided
got = from_format(tbl, index=1, format="astropy.table")
assert got == cosmo
# the index can be a string
got = from_format(tbl, index=cosmo.name, format="astropy.table")
assert got == cosmo
# when there's more than one cosmology found
tbls = vstack([tbl, tbl], metadata_conflicts="silent")
with pytest.raises(ValueError, match="more than one"):
from_format(tbls, index=cosmo.name, format="astropy.table")
@pytest.mark.parametrize("format", [True, False, None, "astropy.table"])
def test_is_equivalent_to_table(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a |Table|.
"""
obj = to_format("astropy.table")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (True if format is not False else False)
class TestToFromTable(ToFromDirectTestBase, ToFromTableTestMixin):
"""Directly test ``to/from_table``."""
def setup_class(self):
self.functions = {"to": to_table, "from": from_table}
|
520ce51cef29b1ff646a16a73d88d0d6ee42aa6a491ee3c8802e60a819c78f0f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import Cosmology, FlatLambdaCDM, Planck18
from astropy.cosmology import units as cu
from astropy.cosmology.io.yaml import (
from_yaml,
to_yaml,
yaml_constructor,
yaml_representer,
)
from astropy.io.misc.yaml import AstropyDumper, dump, load
from .base import ToFromDirectTestBase, ToFromTestMixinBase
##############################################################################
# Test Serializer
def test_yaml_representer():
"""Test :func:`~astropy.cosmology.io.yaml.yaml_representer`."""
# test function `representer`
representer = yaml_representer("!astropy.cosmology.flrw.LambdaCDM")
assert callable(representer)
# test the normal method of dumping to YAML
yml = dump(Planck18)
assert isinstance(yml, str)
assert yml.startswith("!astropy.cosmology.flrw.FlatLambdaCDM")
def test_yaml_constructor():
"""Test :func:`~astropy.cosmology.io.yaml.yaml_constructor`."""
# test function `constructor`
constructor = yaml_constructor(FlatLambdaCDM)
assert callable(constructor)
# it's too hard to manually construct a node, so we only test dump/load
# this is also a good round-trip test
yml = dump(Planck18)
with u.add_enabled_units(cu): # needed for redshift units
cosmo = load(yml)
assert isinstance(cosmo, FlatLambdaCDM)
assert cosmo == Planck18
assert cosmo.meta == Planck18.meta
##############################################################################
# Test Unified I/O
class ToFromYAMLTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="yaml"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.fixture
def xfail_if_not_registered_with_yaml(self, cosmo_cls):
"""
YAML I/O only works on registered classes. So the thing to check is
if this class is registered. If not, :func:`pytest.xfail` this test.
Some of the tests define custom cosmologies. They are not registered.
"""
if cosmo_cls not in AstropyDumper.yaml_representers:
pytest.xfail(
f"Cosmologies of type {cosmo_cls} are not registered with YAML."
)
# ===============================================================
def test_to_yaml(self, cosmo, to_format, xfail_if_not_registered_with_yaml):
"""Test cosmology -> YAML."""
yml = to_format("yaml")
assert isinstance(yml, str) # test type
assert yml.startswith("!astropy.cosmology.")
def test_from_yaml_default(
self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml
):
"""Test cosmology -> YAML -> cosmology."""
yml = to_format("yaml")
got = from_format(yml, format="yaml") # (cannot autoidentify)
assert got.name == cosmo.name
assert got.meta == cosmo.meta
# it won't error if everything matches up
got = from_format(yml, format="yaml")
assert got == cosmo
assert got.meta == cosmo.meta
# auto-identify test moved because it doesn't work.
# see test_from_yaml_autoidentify
def test_from_yaml_autoidentify(
self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml
):
"""As a non-path string, it does NOT auto-identifies 'format'.
TODO! this says there should be different types of I/O registries.
not just hacking object conversion on top of file I/O.
"""
assert self.can_autodentify("yaml") is False
# Showing the specific error. The str is interpreted as a file location
# but is too long a file name.
yml = to_format("yaml")
with pytest.raises((FileNotFoundError, OSError)): # OSError in Windows
from_format(yml)
# # TODO! this is a challenging test to write. It's also unlikely to happen.
# def test_fromformat_subclass_partial_info_yaml(self, cosmo):
# """
# Test writing from an instance and reading from that class.
# This works with missing information.
# """
# -----------------------------------------------------
@pytest.mark.parametrize("format", [True, False, None])
def test_is_equivalent_to_yaml(
self, cosmo, to_format, format, xfail_if_not_registered_with_yaml
):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a YAML string. YAML can't be identified without "format" specified.
"""
obj = to_format("yaml")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is False
def test_is_equivalent_to_yaml_specify_format(
self, cosmo, to_format, xfail_if_not_registered_with_yaml
):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
Same as ``test_is_equivalent_to_yaml`` but with ``format="yaml"``.
"""
assert cosmo.is_equivalent(to_format("yaml"), format="yaml") is True
class TestToFromYAML(ToFromDirectTestBase, ToFromYAMLTestMixin):
"""
Directly test ``to/from_yaml``.
These are not public API and are discouraged from use, in favor of
``Cosmology.to/from_format(..., format="yaml")``, but should be tested
regardless b/c 3rd party packages might use these in their Cosmology I/O.
Also, it's cheap to test.
"""
def setup_class(self):
"""Set up fixtures to use ``to/from_yaml``, not the I/O abstractions."""
self.functions = {"to": to_yaml, "from": from_yaml}
@pytest.fixture(scope="class", autouse=True)
def setup(self):
"""
Setup and teardown for tests.
This overrides from super because `ToFromDirectTestBase` adds a custom
Cosmology ``CosmologyWithKwargs`` that is not registered with YAML.
"""
yield # run tests
def test_from_yaml_autoidentify(self, cosmo, to_format, from_format):
"""
If directly calling the function there's no auto-identification.
So this overrides the test from `ToFromYAMLTestMixin`
"""
|
1a680db519d81f8b07718c370d8cb63311eadcce6fe05df2fa0eb67a66741163 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test that all expected methods are present, before I/O tests import.
This file is weirdly named so that it's the first test of I/O.
"""
from astropy.cosmology.connect import convert_registry, readwrite_registry
def test_expected_readwrite_io():
"""Test that ONLY the expected I/O is registered."""
got = {k for k, _ in readwrite_registry._readers.keys()}
expected = {"ascii.ecsv", "ascii.html"}
assert got == expected
def test_expected_convert_io():
"""Test that ONLY the expected I/O is registered."""
got = {k for k, _ in convert_registry._readers.keys()}
expected = {
"astropy.cosmology",
"mapping",
"astropy.model",
"astropy.row",
"astropy.table",
"yaml",
}
assert got == expected
|
14193a76190f6e75c6c964e6f533eb450919913def03eae0356bc5fb33dd5c21 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
import astropy.units as u
from astropy.cosmology.io.html import _FORMAT_TABLE, read_html_table, write_html_table
from astropy.cosmology.parameter import Parameter
from astropy.table import QTable, Table, vstack
from astropy.units.decorators import NoneType
from astropy.utils.compat.optional_deps import HAS_BS4
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
class ReadWriteHTMLTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="ascii.html"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_bad_index(self, read, write, tmp_path):
"""Test if argument ``index`` is incorrect"""
fp = tmp_path / "test_to_html_table_bad_index.html"
write(fp, format="ascii.html")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
read(fp, index=2, format="ascii.html")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
read(fp, index="row 0", format="ascii.html")
# -----------------------
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_failed_cls(self, write, tmp_path):
"""Test failed table type."""
fp = tmp_path / "test_to_html_table_failed_cls.html"
with pytest.raises(TypeError, match="'cls' must be"):
write(fp, format="ascii.html", cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_cls(self, write, tbl_cls, tmp_path):
fp = tmp_path / "test_to_html_table_cls.html"
write(fp, format="ascii.html", cls=tbl_cls)
# -----------------------
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_readwrite_html_table_instance(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""Test cosmology -> ascii.html -> cosmology."""
fp = tmp_path / "test_readwrite_html_table_instance.html"
# ------------
# To Table
write(fp, format="ascii.html")
# some checks on the saved file
tbl = QTable.read(fp)
# assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ # metadata read not implemented
assert tbl["name"] == cosmo.name
# ------------
# From Table
tbl["mismatching"] = "will error"
tbl.write(fp, format="ascii.html", overwrite=True)
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = read(fp, format="ascii.html")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
# assert "mismatching" not in got.meta # metadata read not implemented
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
read(fp, format="ascii.html")
# unless mismatched are moved to meta
got = read(fp, format="ascii.html", move_to_meta=True)
assert got == cosmo
# assert got.meta["mismatching"] == "will error" # metadata read not implemented
# it won't error if everything matches up
tbl.remove_column("mismatching")
tbl.write(fp, format="ascii.html", overwrite=True)
got = read(fp, format="ascii.html")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``write``.
# tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] #
# metadata read not implemented
got = read(fp, format="ascii.html")
assert got == cosmo
got = read(fp)
assert got == cosmo
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_rename_html_table_columns(self, read, write, tmp_path):
"""Tests renaming columns"""
fp = tmp_path / "test_rename_html_table_columns.html"
write(fp, format="ascii.html", latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
# For now, Cosmology class and name are stored in first 2 slots
for column_name in tbl.colnames[2:]:
assert column_name in _FORMAT_TABLE.values()
cosmo = read(fp, format="ascii.html")
converted_tbl = cosmo.to_format("astropy.table")
# asserts each column name has been reverted
# cosmology name is still stored in first slot
for column_name in converted_tbl.colnames[1:]:
assert column_name in _FORMAT_TABLE.keys()
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
@pytest.mark.parametrize("latex_names", [True, False])
def test_readwrite_html_subclass_partial_info(
self, cosmo_cls, cosmo, read, write, latex_names, tmp_path, add_cu
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_read_html_subclass_partial_info.html"
# test write
write(fp, format="ascii.html", latex_names=latex_names)
# partial information
tbl = QTable.read(fp)
# tbl.meta.pop("cosmology", None) # metadata not implemented
cname = "$$T_{0}$$" if latex_names else "Tcmb0"
del tbl[cname] # format is not converted to original units
tbl.write(fp, overwrite=True)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(fp, format="ascii.html")
got2 = read(fp, format="ascii.html", cosmology=cosmo_cls)
got3 = read(fp, format="ascii.html", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
# assert got.meta == cosmo.meta # metadata read not implemented
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_readwrite_html_mutlirow(self, cosmo, read, write, tmp_path, add_cu):
"""Test if table has multiple rows."""
fp = tmp_path / "test_readwrite_html_mutlirow.html"
# Make
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
table = vstack(
[c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts="silent",
)
cosmo_cls = type(cosmo)
if cosmo_cls == NoneType:
assert False
for n, col in zip(table.colnames, table.itercols()):
if n == "cosmology":
continue
param = getattr(cosmo_cls, n)
if not isinstance(param, Parameter) or param.unit in (None, u.one):
continue
# Replace column with unitless version
table.replace_column(n, (col << param.unit).value, copy=False)
table.write(fp, format="ascii.html")
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
read(fp, format="ascii.html")
# unless the index argument is provided
got = cosmo_cls.read(fp, index=1, format="ascii.html")
# got = read(fp, index=1, format="ascii.html")
assert got == cosmo
# the index can be a string
got = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
assert got == cosmo
# it's better if the table already has an index
# this will be identical to the previous ``got``
table.add_index("name")
got2 = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
assert got2 == cosmo
class TestReadWriteHTML(ReadWriteDirectTestBase, ReadWriteHTMLTestMixin):
"""
Directly test ``read/write_html``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="ascii.html")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_html_table, "write": write_html_table}
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_rename_direct_html_table_columns(self, read, write, tmp_path):
"""Tests renaming columns"""
fp = tmp_path / "test_rename_html_table_columns.html"
write(fp, format="ascii.html", latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
for column_name in tbl.colnames[2:]:
# for now, Cosmology as metadata and name is stored in first 2 slots
assert column_name in _FORMAT_TABLE.values()
cosmo = read(fp, format="ascii.html")
converted_tbl = cosmo.to_format("astropy.table")
# asserts each column name has been reverted
for column_name in converted_tbl.colnames[1:]:
# for now now, metadata is still stored in first slot
assert column_name in _FORMAT_TABLE.keys()
|
017a5d0240b6ed9a0d847f9c7dd48becd68b19766d5c078794a5b4e60bf81b91 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
# LOCAL
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.io.ecsv import read_ecsv, write_ecsv
from astropy.table import QTable, Table, vstack
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
class ReadWriteECSVTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="ascii.ecsv"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_ecsv_bad_index(self, read, write, tmp_path):
"""Test if argument ``index`` is incorrect"""
fp = tmp_path / "test_to_ecsv_bad_index.ecsv"
write(fp, format="ascii.ecsv")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
read(fp, index=2, format="ascii.ecsv")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
read(fp, index="row 0", format="ascii.ecsv")
# -----------------------
def test_to_ecsv_failed_cls(self, write, tmp_path):
"""Test failed table type."""
fp = tmp_path / "test_to_ecsv_failed_cls.ecsv"
with pytest.raises(TypeError, match="'cls' must be"):
write(fp, format="ascii.ecsv", cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_ecsv_cls(self, write, tbl_cls, tmp_path):
fp = tmp_path / "test_to_ecsv_cls.ecsv"
write(fp, format="ascii.ecsv", cls=tbl_cls)
# -----------------------
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_ecsv_in_meta(self, cosmo_cls, write, in_meta, tmp_path, add_cu):
"""Test where the cosmology class is placed."""
fp = tmp_path / "test_to_ecsv_in_meta.ecsv"
write(fp, format="ascii.ecsv", cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
tbl = QTable.read(fp)
if in_meta:
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.colnames # not also a column
else:
assert tbl["cosmology"][0] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.meta
# -----------------------
def test_readwrite_ecsv_instance(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""Test cosmology -> ascii.ecsv -> cosmology."""
fp = tmp_path / "test_readwrite_ecsv_instance.ecsv"
# ------------
# To Table
write(fp, format="ascii.ecsv")
# some checks on the saved file
tbl = QTable.read(fp)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl["name"] == cosmo.name
# ------------
# From Table
tbl["mismatching"] = "will error"
tbl.write(fp, format="ascii.ecsv", overwrite=True)
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = read(fp, format="ascii.ecsv")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
read(fp, format="ascii.ecsv")
# unless mismatched are moved to meta
got = read(fp, format="ascii.ecsv", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
tbl.remove_column("mismatching")
tbl.write(fp, format="ascii.ecsv", overwrite=True)
got = read(fp, format="ascii.ecsv")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``write``.
tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]]
got = read(fp, format="ascii.ecsv")
assert got == cosmo
# also it auto-identifies 'format'
got = read(fp)
assert got == cosmo
def test_readwrite_ecsv_subclass_partial_info(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_read_ecsv_subclass_partial_info.ecsv"
# test write
write(fp, format="ascii.ecsv")
# partial information
tbl = QTable.read(fp)
tbl.meta.pop("cosmology", None)
del tbl["Tcmb0"]
tbl.write(fp, overwrite=True)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(fp, format="ascii.ecsv")
got2 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls)
got3 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
def test_readwrite_ecsv_mutlirow(self, cosmo, read, write, tmp_path, add_cu):
"""Test if table has multiple rows."""
fp = tmp_path / "test_readwrite_ecsv_mutlirow.ecsv"
# Make
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
tbl = vstack(
[c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts="silent",
)
tbl.write(fp, format="ascii.ecsv")
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
read(fp, format="ascii.ecsv")
# unless the index argument is provided
got = read(fp, index=1, format="ascii.ecsv")
assert got == cosmo
# the index can be a string
got = read(fp, index=cosmo.name, format="ascii.ecsv")
assert got == cosmo
# it's better if the table already has an index
# this will be identical to the previous ``got``
tbl.add_index("name")
got2 = read(fp, index=cosmo.name, format="ascii.ecsv")
assert got2 == cosmo
class TestReadWriteECSV(ReadWriteDirectTestBase, ReadWriteECSVTestMixin):
"""
Directly test ``read/write_ecsv``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="ascii.ecsv")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_ecsv, "write": write_ecsv}
|
faeb8a095203305a4ccfd85401baf11e9a791ee02de2e6c79bb9dec955fa0a93 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import Cosmology, Parameter, realizations
from astropy.cosmology import units as cu
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.realizations import available
cosmo_instances = [getattr(realizations, name) for name in available]
##############################################################################
class IOTestBase:
"""Base class for Cosmology I/O tests.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
class ToFromTestMixinBase(IOTestBase):
"""Tests for a Cosmology[To/From]Format with some ``format``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class")
def from_format(self):
"""Convert to Cosmology using ``Cosmology.from_format()``."""
return Cosmology.from_format
@pytest.fixture(scope="class")
def to_format(self, cosmo):
"""Convert Cosmology instance using ``.to_format()``."""
return cosmo.to_format
def can_autodentify(self, format):
"""Check whether a format can auto-identify."""
return format in Cosmology.from_format.registry._identifiers
class ReadWriteTestMixinBase(IOTestBase):
"""Tests for a Cosmology[Read/Write].
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class")
def read(self):
"""Read Cosmology instance using ``Cosmology.read()``."""
return Cosmology.read
@pytest.fixture(scope="class")
def write(self, cosmo):
"""Write Cosmology using ``.write()``."""
return cosmo.write
@pytest.fixture
def add_cu(self):
"""Add :mod:`astropy.cosmology.units` to the enabled units."""
# TODO! autoenable 'cu' if cosmology is imported?
with u.add_enabled_units(cu):
yield
##############################################################################
class IODirectTestBase(IOTestBase):
"""Directly test Cosmology I/O functions.
These functions are not public API and are discouraged from public use, in
favor of the I/O methods on |Cosmology|. They are tested b/c they are used
internally and because some tests for the methods on |Cosmology| don't need
to be run in the |Cosmology| class's large test matrix.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass.
"""
@pytest.fixture(scope="class", autouse=True)
def setup(self):
"""Setup and teardown for tests."""
class CosmologyWithKwargs(Cosmology):
Tcmb0 = Parameter(unit=u.K)
def __init__(
self, Tcmb0=0, name="cosmology with kwargs", meta=None, **kwargs
):
super().__init__(name=name, meta=meta)
self._Tcmb0 = Tcmb0 << u.K
yield # run tests
# pop CosmologyWithKwargs from registered classes
# but don't error b/c it can fail in parallel
_COSMOLOGY_CLASSES.pop(CosmologyWithKwargs.__qualname__, None)
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
"""Cosmology instance."""
if isinstance(request.param, str): # CosmologyWithKwargs
return _COSMOLOGY_CLASSES[request.param](Tcmb0=3)
return request.param
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
"""Cosmology classes."""
return cosmo.__class__
class ToFromDirectTestBase(IODirectTestBase, ToFromTestMixinBase):
"""Directly test ``to/from_<format>``.
These functions are not public API and are discouraged from public use, in
favor of ``Cosmology.to/from_format(..., format="<format>")``. They are
tested because they are used internally and because some tests for the
methods on |Cosmology| don't need to be run in the |Cosmology| class's
large test matrix.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass.
Subclasses should have an attribute ``functions`` which is a dictionary
containing two items: ``"to"=<function for to_format>`` and
``"from"=<function for from_format>``.
"""
@pytest.fixture(scope="class")
def from_format(self):
"""Convert to Cosmology using function ``from``."""
def use_from_format(*args, **kwargs):
kwargs.pop("format", None) # specific to Cosmology.from_format
return self.functions["from"](*args, **kwargs)
return use_from_format
@pytest.fixture(scope="class")
def to_format(self, cosmo):
"""Convert Cosmology to format using function ``to``."""
def use_to_format(*args, **kwargs):
return self.functions["to"](cosmo, *args, **kwargs)
return use_to_format
class ReadWriteDirectTestBase(IODirectTestBase, ToFromTestMixinBase):
"""Directly test ``read/write_<format>``.
These functions are not public API and are discouraged from public use, in
favor of ``Cosmology.read/write(..., format="<format>")``. They are tested
because they are used internally and because some tests for the
methods on |Cosmology| don't need to be run in the |Cosmology| class's
large test matrix.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass.
Subclasses should have an attribute ``functions`` which is a dictionary
containing two items: ``"read"=<function for read>`` and
``"write"=<function for write>``.
"""
@pytest.fixture(scope="class")
def read(self):
"""Read Cosmology from file using function ``read``."""
def use_read(*args, **kwargs):
kwargs.pop("format", None) # specific to Cosmology.from_format
return self.functions["read"](*args, **kwargs)
return use_read
@pytest.fixture(scope="class")
def write(self, cosmo):
"""Write Cosmology to file using function ``write``."""
def use_write(*args, **kwargs):
return self.functions["write"](cosmo, *args, **kwargs)
return use_write
|
9dfbd1e822a4cd8ccaab8a342e76165f2ab1031cd522a0ad28d1bc57b1086d60 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
# LOCAL
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
from astropy.cosmology.io.row import from_row, to_row
from astropy.table import Row
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromRowTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="astropy.row"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_row_in_meta(self, cosmo_cls, cosmo, in_meta):
"""Test where the cosmology class is placed."""
row = cosmo.to_format("astropy.row", cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
if in_meta:
assert row.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in row.colnames # not also a column
else:
assert row["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in row.meta
# -----------------------
def test_from_not_row(self, cosmo, from_format):
"""Test not passing a Row to the Row parser."""
with pytest.raises(AttributeError):
from_format("NOT A ROW", format="astropy.row")
def test_tofrom_row_instance(self, cosmo, to_format, from_format):
"""Test cosmology -> astropy.row -> cosmology."""
# ------------
# To Row
row = to_format("astropy.row")
assert isinstance(row, Row)
assert row["cosmology"] == cosmo.__class__.__qualname__
assert row["name"] == cosmo.name
# ------------
# From Row
row.table["mismatching"] = "will error"
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(row, format="astropy.row")
assert got.__class__ is cosmo.__class__
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(row, format="astropy.row")
# unless mismatched are moved to meta
got = from_format(row, format="astropy.row", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
row.table.remove_column("mismatching")
got = from_format(row, format="astropy.row")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``to_format``.
cosmology = _COSMOLOGY_CLASSES[row["cosmology"]]
row.table.remove_column("cosmology")
row.table["cosmology"] = cosmology
got = from_format(row, format="astropy.row")
assert got == cosmo
# also it auto-identifies 'format'
got = from_format(row)
assert got == cosmo
def test_fromformat_row_subclass_partial_info(self, cosmo):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
pass # there are no partial info options
@pytest.mark.parametrize("format", [True, False, None, "astropy.row"])
def test_is_equivalent_to_row(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a Row.
"""
obj = to_format("astropy.row")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (True if format is not False else False)
class TestToFromTable(ToFromDirectTestBase, ToFromRowTestMixin):
"""
Directly test ``to/from_row``.
These are not public API and are discouraged from use, in favor of
``Cosmology.to/from_format(..., format="astropy.row")``, but should be
tested regardless b/c 3rd party packages might use these in their Cosmology
I/O. Also, it's cheap to test.
"""
def setup_class(self):
self.functions = {"to": to_row, "from": from_row}
|
99c793c14478505d3cff69e602a2f1397ff3f31b4133237cb9c4700507d0de24 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import json
import os
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import units as cu
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
def read_json(filename, **kwargs):
"""Read JSON.
Parameters
----------
filename : str
**kwargs
Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format`
Returns
-------
`~astropy.cosmology.Cosmology` instance
"""
# read
if isinstance(filename, (str, bytes, os.PathLike)):
with open(filename) as file:
data = file.read()
else: # file-like : this also handles errors in dumping
data = filename.read()
mapping = json.loads(data) # parse json mappable to dict
# deserialize Quantity
with u.add_enabled_units(cu.redshift):
for k, v in mapping.items():
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping[k] = u.Quantity(v["value"], v["unit"])
for k, v in mapping.get("meta", {}).items(): # also the metadata
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping["meta"][k] = u.Quantity(v["value"], v["unit"])
return Cosmology.from_format(mapping, format="mapping", **kwargs)
def write_json(cosmology, file, *, overwrite=False):
"""Write Cosmology to JSON.
Parameters
----------
cosmology : `astropy.cosmology.Cosmology` subclass instance
file : path-like or file-like
overwrite : bool (optional, keyword-only)
"""
data = cosmology.to_format("mapping") # start by turning into dict
data["cosmology"] = data["cosmology"].__qualname__
# serialize Quantity
for k, v in data.items():
if isinstance(v, u.Quantity):
data[k] = {"value": v.value.tolist(), "unit": str(v.unit)}
for k, v in data.get("meta", {}).items(): # also serialize the metadata
if isinstance(v, u.Quantity):
data["meta"][k] = {"value": v.value.tolist(), "unit": str(v.unit)}
# check that file exists and whether to overwrite.
if os.path.exists(file) and not overwrite:
raise OSError(f"{file} exists. Set 'overwrite' to write over.")
with open(file, "w") as write_file:
json.dump(data, write_file)
def json_identify(origin, filepath, fileobj, *args, **kwargs):
return filepath is not None and filepath.endswith(".json")
###############################################################################
class ReadWriteJSONTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="json"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class", autouse=True)
def register_and_unregister_json(self):
"""Setup & teardown for JSON read/write tests."""
# Register
readwrite_registry.register_reader("json", Cosmology, read_json, force=True)
readwrite_registry.register_writer("json", Cosmology, write_json, force=True)
readwrite_registry.register_identifier(
"json", Cosmology, json_identify, force=True
)
yield # Run all tests in class
# Unregister
readwrite_registry.unregister_reader("json", Cosmology)
readwrite_registry.unregister_writer("json", Cosmology)
readwrite_registry.unregister_identifier("json", Cosmology)
# ========================================================================
def test_readwrite_json_subclass_partial_info(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_readwrite_json_subclass_partial_info.json"
# test write
cosmo.write(fp, format="json")
# partial information
with open(fp) as file:
L = file.readlines()[0]
L = (
L[: L.index('"cosmology":')] + L[L.index(", ") + 2 :]
) # remove cosmology : #203
i = L.index('"Tcmb0":') # delete Tcmb0
L = (
L[:i] + L[L.index(", ", L.index(", ", i) + 1) + 2 :]
) # second occurence : #203
tempfname = tmp_path / f"{cosmo.name}_temp.json"
with open(tempfname, "w") as file:
file.writelines([L])
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(tempfname, format="json")
got2 = read(tempfname, format="json", cosmology=cosmo_cls)
got3 = read(tempfname, format="json", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
class TestReadWriteJSON(ReadWriteDirectTestBase, ReadWriteJSONTestMixin):
"""
Directly test ``read/write_json``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="json")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_json, "write": write_json}
|
bd21fd7e2836f7e2acfb95feceec5f60a25b5cc1b78b4724a619d0bdc5c9bde5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.wpwazpcdm`."""
##############################################################################
# IMPORTS
# STDLIB
# THIRD PARTY
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import wpwaCDM
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.test_core import ParameterTestMixin
from .test_base import FLRWSubclassTest
from .test_w0wacdm import ParameterwaTestMixin
##############################################################################
# TESTS
##############################################################################
class ParameterwpTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wp on a Cosmology.
wp is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wp(self, cosmo_cls, cosmo):
"""Test Parameter ``wp``."""
# on the class
assert isinstance(cosmo_cls.wp, Parameter)
assert "at the pivot" in cosmo_cls.wp.__doc__
assert cosmo_cls.wp.unit is None
# on the instance
assert cosmo.wp is cosmo._wp
assert cosmo.wp == self.cls_kwargs["wp"]
def test_init_wp(self, cosmo_cls, ba):
"""Test initialization for values of ``wp``."""
# test that it works with units
ba.arguments["wp"] = ba.arguments["wp"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wp == ba.arguments["wp"]
# also without units
ba.arguments["wp"] = ba.arguments["wp"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wp == ba.arguments["wp"]
# must be dimensionless
ba.arguments["wp"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterzpTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` zp on a Cosmology.
zp is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_zp(self, cosmo_cls, cosmo):
"""Test Parameter ``zp``."""
# on the class
assert isinstance(cosmo_cls.zp, Parameter)
assert "pivot redshift" in cosmo_cls.zp.__doc__
assert cosmo_cls.zp.unit == cu.redshift
# on the instance
assert cosmo.zp is cosmo._zp
assert cosmo.zp == self.cls_kwargs["zp"] << cu.redshift
def test_init_zp(self, cosmo_cls, ba):
"""Test initialization for values of ``zp``."""
# test that it works with units
ba.arguments["zp"] = ba.arguments["zp"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.zp == ba.arguments["zp"]
# also without units
ba.arguments["zp"] = ba.arguments["zp"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.zp.value == ba.arguments["zp"]
# must be dimensionless
ba.arguments["zp"] = 10 * u.km
with pytest.raises(u.UnitConversionError):
cosmo_cls(*ba.args, **ba.kwargs)
class TestwpwaCDM(
FLRWSubclassTest, ParameterwpTestMixin, ParameterwaTestMixin, ParameterzpTestMixin
):
"""Test :class:`astropy.cosmology.wpwaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = wpwaCDM
self.cls_kwargs.update(wp=-0.9, wa=0.2, zp=0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(wp=0.1, wa=0.2, zp=14)
assert c.wp == 0.1
assert c.wa == 0.2
assert c.zp == 14
for n in set(cosmo.__parameters__) - {"wp", "wa", "zp"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
# @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below
def test_w(self, cosmo):
"""Test :meth:`astropy.cosmology.wpwaCDM.w`."""
# super().test_w(cosmo, z)
assert u.allclose(cosmo.w(0.5), -0.9)
assert u.allclose(
cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667, -0.82380952, -0.78266667],
)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'wpwaCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,'
" Ode0=0.73, wp=-0.9, wa=0.2, zp=0.5 redshift, Tcmb0=3.0 K,"
" Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.03)"
)
assert repr(cosmo) == expected
|
bc3a74f3b0264552e9c89419d20b48889840851799ca51ef4055e05d457b1332 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.lambdacdm`."""
##############################################################################
# IMPORTS
# STDLIB
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import FlatLambdaCDM, LambdaCDM
from astropy.cosmology.flrw.lambdacdm import ellipkinc, hyp2f1
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.cosmology.tests.test_core import invalid_zs, valid_zs
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .test_base import FlatFLRWMixinTest, FLRWSubclassTest
##############################################################################
# TESTS
##############################################################################
@pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed")
def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
ellipkinc()
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
hyp2f1()
##############################################################################
class TestLambdaCDM(FLRWSubclassTest):
"""Test :class:`astropy.cosmology.LambdaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = LambdaCDM
# ===============================================================
# Method & Attribute Tests
_FLRW_redshift_methods = get_redshift_methods(
LambdaCDM, include_private=True, include_z2=False
) - {"_dS_age"}
# `_dS_age` is removed because it doesn't strictly rely on the value of `z`,
# so any input that doesn't trip up ``np.shape`` is "valid"
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize("method", _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
@pytest.mark.parametrize("z", valid_zs)
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.LambdaCDM.w`."""
super().test_w(cosmo, z)
w = cosmo.w(z)
assert u.allclose(w, -1.0)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'LambdaCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,'
" Ode0=0.73, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)"
)
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class TestFlatLambdaCDM(FlatFLRWMixinTest, TestLambdaCDM):
"""Test :class:`astropy.cosmology.FlatLambdaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = FlatLambdaCDM
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize("method", TestLambdaCDM._FLRW_redshift_methods - {"Otot"})
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ===============================================================
# Method & Attribute Tests
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'FlatLambdaCDM(name="ABCMeta", H0=70.0 km / (Mpc s),'
" Om0=0.27, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)"
)
assert repr(cosmo) == expected
|
065deff241cfd642fd8530da6cee59781f432b8bd9208fcf03d957562c6a8dbf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.base`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import copy
# THIRD PARTY
import numpy as np
import pytest
import astropy.constants as const
# LOCAL
import astropy.units as u
from astropy.cosmology import FLRW, FlatLambdaCDM, LambdaCDM, Planck18
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.flrw.base import _a_B_c2, _critdens_const, _H0units_to_invs, quad
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.cosmology.tests.test_core import CosmologySubclassTest as CosmologyTest
from astropy.cosmology.tests.test_core import (
FlatCosmologyMixinTest,
ParameterTestMixin,
invalid_zs,
valid_zs,
)
from astropy.utils.compat.optional_deps import HAS_SCIPY
##############################################################################
# SETUP / TEARDOWN
class SubFLRW(FLRW):
def w(self, z):
return super().w(z)
##############################################################################
# TESTS
##############################################################################
@pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed")
def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.integrate'"):
quad()
##############################################################################
class ParameterH0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` H0 on a Cosmology.
H0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_H0(self, cosmo_cls, cosmo):
"""Test Parameter ``H0``."""
unit = u.Unit("km/(s Mpc)")
# on the class
assert isinstance(cosmo_cls.H0, Parameter)
assert "Hubble constant" in cosmo_cls.H0.__doc__
assert cosmo_cls.H0.unit == unit
# validation
assert cosmo_cls.H0.validate(cosmo, 1) == 1 * unit
assert cosmo_cls.H0.validate(cosmo, 10 * unit) == 10 * unit
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls.H0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.H0 is cosmo._H0
assert cosmo.H0 == self._cls_args["H0"]
assert isinstance(cosmo.H0, u.Quantity) and cosmo.H0.unit == unit
def test_init_H0(self, cosmo_cls, ba):
"""Test initialization for values of ``H0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0 == ba.arguments["H0"]
# also without units
ba.arguments["H0"] = ba.arguments["H0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0.value == ba.arguments["H0"]
# fails for non-scalar
ba.arguments["H0"] = u.Quantity([70, 100], u.km / u.s / u.Mpc)
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOm0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Om0 on a Cosmology.
Om0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Om0(self, cosmo_cls, cosmo):
"""Test Parameter ``Om0``."""
# on the class
assert isinstance(cosmo_cls.Om0, Parameter)
assert "Omega matter" in cosmo_cls.Om0.__doc__
# validation
assert cosmo_cls.Om0.validate(cosmo, 1) == 1
assert cosmo_cls.Om0.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Om0 cannot be negative"):
cosmo_cls.Om0.validate(cosmo, -1)
# on the instance
assert cosmo.Om0 is cosmo._Om0
assert cosmo.Om0 == self._cls_args["Om0"]
assert isinstance(cosmo.Om0, float)
def test_init_Om0(self, cosmo_cls, ba):
"""Test initialization for values of ``Om0``."""
# test that it works with units
ba.arguments["Om0"] = ba.arguments["Om0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# also without units
ba.arguments["Om0"] = ba.arguments["Om0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# fails for negative numbers
ba.arguments["Om0"] = -0.27
with pytest.raises(ValueError, match="Om0 cannot be negative."):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOde0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a Cosmology.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
assert isinstance(cosmo_cls.Ode0, Parameter)
assert "Omega dark energy" in cosmo_cls.Ode0.__doc__
def test_Parameter_Ode0_validation(self, cosmo_cls, cosmo):
"""Test Parameter ``Ode0`` validation."""
assert cosmo_cls.Ode0.validate(cosmo, 1.1) == 1.1
assert cosmo_cls.Ode0.validate(cosmo, 10 * u.one) == 10.0
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls.Ode0.validate(cosmo, 10 * u.km)
def test_Ode0(self, cosmo):
"""Test Parameter ``Ode0`` validation."""
# if Ode0 is a parameter, test its value
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == self._cls_args["Ode0"]
assert isinstance(cosmo.Ode0, float)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
# test that it works with units
ba.arguments["Ode0"] = ba.arguments["Ode0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# also without units
ba.arguments["Ode0"] = ba.arguments["Ode0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# Setting param to 0 respects that. Note this test uses ``Ode()``.
ba.arguments["Ode0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert u.allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Ode(1), 0)
# Must be dimensionless or have no units. Errors otherwise.
ba.arguments["Ode0"] = 10 * u.km
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterTcmb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Tcmb0 on a Cosmology.
Tcmb0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Tcmb0(self, cosmo_cls, cosmo):
"""Test Parameter ``Tcmb0``."""
# on the class
assert isinstance(cosmo_cls.Tcmb0, Parameter)
assert "Temperature of the CMB" in cosmo_cls.Tcmb0.__doc__
assert cosmo_cls.Tcmb0.unit == u.K
# validation
assert cosmo_cls.Tcmb0.validate(cosmo, 1) == 1 * u.K
assert cosmo_cls.Tcmb0.validate(cosmo, 10 * u.K) == 10 * u.K
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls.Tcmb0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.Tcmb0 is cosmo._Tcmb0
assert cosmo.Tcmb0 == self.cls_kwargs["Tcmb0"]
assert isinstance(cosmo.Tcmb0, u.Quantity) and cosmo.Tcmb0.unit == u.K
def test_init_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``Tcmb0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0 == ba.arguments["Tcmb0"]
# also without units
ba.arguments["Tcmb0"] = ba.arguments["Tcmb0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0.value == ba.arguments["Tcmb0"]
# must be a scalar
ba.arguments["Tcmb0"] = u.Quantity([0.0, 2], u.K)
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterNeffTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Neff on a Cosmology.
Neff is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Neff(self, cosmo_cls, cosmo):
"""Test Parameter ``Neff``."""
# on the class
assert isinstance(cosmo_cls.Neff, Parameter)
assert "Number of effective neutrino species" in cosmo_cls.Neff.__doc__
# validation
assert cosmo_cls.Neff.validate(cosmo, 1) == 1
assert cosmo_cls.Neff.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Neff cannot be negative"):
cosmo_cls.Neff.validate(cosmo, -1)
# on the instance
assert cosmo.Neff is cosmo._Neff
assert cosmo.Neff == self.cls_kwargs.get("Neff", 3.04)
assert isinstance(cosmo.Neff, float)
def test_init_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``Neff``."""
# test that it works with units
ba.arguments["Neff"] = ba.arguments["Neff"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
# also without units
ba.arguments["Neff"] = ba.arguments["Neff"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
ba.arguments["Neff"] = -1
with pytest.raises(ValueError):
cosmo_cls(*ba.args, **ba.kwargs)
class Parameterm_nuTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` m_nu on a Cosmology.
m_nu is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_m_nu(self, cosmo_cls, cosmo):
"""Test Parameter ``m_nu``."""
# on the class
assert isinstance(cosmo_cls.m_nu, Parameter)
assert "Mass of neutrino species" in cosmo_cls.m_nu.__doc__
assert cosmo_cls.m_nu.unit == u.eV
assert cosmo_cls.m_nu.equivalencies == u.mass_energy()
# on the instance
# assert cosmo.m_nu is cosmo._m_nu
assert u.allclose(cosmo.m_nu, [0.0, 0.0, 0.0] * u.eV)
# set differently depending on the other inputs
if cosmo.Tnu0.value == 0:
assert cosmo.m_nu is None
elif not cosmo._massivenu: # only massless
assert u.allclose(cosmo.m_nu, 0 * u.eV)
elif self._nmasslessnu == 0: # only massive
assert cosmo.m_nu == cosmo._massivenu_mass
else: # a mix -- the most complicated case
assert u.allclose(cosmo.m_nu[: self._nmasslessnu], 0 * u.eV)
assert u.allclose(cosmo.m_nu[self._nmasslessnu], cosmo._massivenu_mass)
def test_init_m_nu(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this requires the class to have a property ``has_massive_nu``.
"""
# Test that it works when m_nu has units.
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu == ba.arguments["m_nu"]) # (& checks len, unit)
assert not cosmo.has_massive_nu
assert cosmo.m_nu.unit == u.eV # explicitly check unit once.
# And it works when m_nu doesn't have units.
ba.arguments["m_nu"] = ba.arguments["m_nu"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu.value == ba.arguments["m_nu"])
assert not cosmo.has_massive_nu
# A negative m_nu raises an exception.
tba = copy.copy(ba)
tba.arguments["m_nu"] = u.Quantity([-0.3, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="invalid"):
cosmo_cls(*tba.args, **tba.kwargs)
def test_init_m_nu_and_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu`` and ``Neff``.
Note this test requires ``Neff`` as constructor input, and a property
``has_massive_nu``.
"""
# Mismatch with Neff = wrong number of neutrinos
tba = copy.copy(ba)
tba.arguments["Neff"] = 4.05
tba.arguments["m_nu"] = u.Quantity([0.15, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="unexpected number of neutrino"):
cosmo_cls(*tba.args, **tba.kwargs)
# No neutrinos, but Neff
tba.arguments["m_nu"] = 0
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert not cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, 0 * u.eV)
# TODO! move this test when create ``test_nu_relative_density``
assert u.allclose(
cosmo.nu_relative_density(1.0), 0.22710731766 * 4.05, rtol=1e-6
)
# All massive neutrinos case, len from Neff
tba.arguments["m_nu"] = 0.1 * u.eV
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, [0.1, 0.1, 0.1, 0.1] * u.eV)
def test_init_m_nu_override_by_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this test requires ``Tcmb0`` as constructor input, and a property
``has_massive_nu``.
"""
# If Neff = 0, m_nu is None.
tba = copy.copy(ba)
tba.arguments["Neff"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
# If Tcmb0 = 0, m_nu is None
tba = copy.copy(ba)
tba.arguments["Tcmb0"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
class ParameterOb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ob0 on a Cosmology.
Ob0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Ob0(self, cosmo_cls, cosmo):
"""Test Parameter ``Ob0``."""
# on the class
assert isinstance(cosmo_cls.Ob0, Parameter)
assert "Omega baryon;" in cosmo_cls.Ob0.__doc__
# validation
assert cosmo_cls.Ob0.validate(cosmo, None) is None
assert cosmo_cls.Ob0.validate(cosmo, 0.1) == 0.1
assert cosmo_cls.Ob0.validate(cosmo, 0.1 * u.one) == 0.1
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls.Ob0.validate(cosmo, -1)
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls.Ob0.validate(cosmo, cosmo.Om0 + 1)
# on the instance
assert cosmo.Ob0 is cosmo._Ob0
assert cosmo.Ob0 == 0.03
def test_init_Ob0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ob0``."""
# test that it works with units
assert isinstance(ba.arguments["Ob0"], u.Quantity)
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# also without units
ba.arguments["Ob0"] = ba.arguments["Ob0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# Setting param to 0 respects that. Note this test uses ``Ob()``.
ba.arguments["Ob0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ob(1), 0)
assert u.allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# Negative Ob0 errors
tba = copy.copy(ba)
tba.arguments["Ob0"] = -0.04
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls(*tba.args, **tba.kwargs)
# Ob0 > Om0 errors
tba.arguments["Ob0"] = tba.arguments["Om0"] + 0.1
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls(*tba.args, **tba.kwargs)
# No baryons specified means baryon-specific methods fail.
tba = copy.copy(ba)
tba.arguments.pop("Ob0", None)
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
with pytest.raises(ValueError):
cosmo.Ob(1)
# also means DM fraction is undefined
with pytest.raises(ValueError):
cosmo.Odm(1)
# The default value is None
assert cosmo_cls._init_signature.parameters["Ob0"].default is None
class TestFLRW(
CosmologyTest,
ParameterH0TestMixin,
ParameterOm0TestMixin,
ParameterOde0TestMixin,
ParameterTcmb0TestMixin,
ParameterNeffTestMixin,
Parameterm_nuTestMixin,
ParameterOb0TestMixin,
):
"""Test :class:`astropy.cosmology.FLRW`."""
abstract_w = True
def setup_class(self):
"""
Setup for testing.
FLRW is abstract, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubFLRW"] = SubFLRW
self.cls = SubFLRW
self._cls_args = dict(
H0=70 * u.km / u.s / u.Mpc, Om0=0.27 * u.one, Ode0=0.73 * u.one
)
self.cls_kwargs = dict(
Tcmb0=3.0 * u.K,
Ob0=0.03 * u.one,
name=self.__class__.__name__,
meta={"a": "b"},
)
def teardown_class(self):
super().teardown_class(self)
_COSMOLOGY_CLASSES.pop("SubFLRW", None)
@pytest.fixture(scope="class")
def nonflatcosmo(self):
"""A non-flat cosmology used in equivalence tests."""
return LambdaCDM(70, 0.4, 0.8)
# ===============================================================
# Method & Attribute Tests
def test_init(self, cosmo_cls):
"""Test initialization."""
super().test_init(cosmo_cls)
# TODO! tests for initializing calculated values, e.g. `h`
# TODO! transfer tests for initializing neutrinos
def test_init_Tcmb0_zeroing(self, cosmo_cls, ba):
"""Test if setting Tcmb0 parameter to 0 influences other parameters.
TODO: consider moving this test to ``FLRWSubclassTest``
"""
ba.arguments["Tcmb0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ogamma0 == 0.0
assert cosmo.Onu0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# ---------------------------------------------------------------
# Properties
def test_Odm0(self, cosmo_cls, cosmo):
"""Test property ``Odm0``."""
# on the class
assert isinstance(cosmo_cls.Odm0, property)
assert cosmo_cls.Odm0.fset is None # immutable
# on the instance
assert cosmo.Odm0 is cosmo._Odm0
# Odm0 can be None, if Ob0 is None. Otherwise DM = matter - baryons.
if cosmo.Ob0 is None:
assert cosmo.Odm0 is None
else:
assert np.allclose(cosmo.Odm0, cosmo.Om0 - cosmo.Ob0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
# on the class
assert isinstance(cosmo_cls.Ok0, property)
assert cosmo_cls.Ok0.fset is None # immutable
# on the instance
assert cosmo.Ok0 is cosmo._Ok0
assert np.allclose(
cosmo.Ok0, 1.0 - (cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0)
)
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# on the class
assert isinstance(cosmo_cls.is_flat, property)
assert cosmo_cls.is_flat.fset is None # immutable
# on the instance
assert isinstance(cosmo.is_flat, bool)
assert cosmo.is_flat is bool((cosmo.Ok0 == 0.0) and (cosmo.Otot0 == 1.0))
def test_Tnu0(self, cosmo_cls, cosmo):
"""Test property ``Tnu0``."""
# on the class
assert isinstance(cosmo_cls.Tnu0, property)
assert cosmo_cls.Tnu0.fset is None # immutable
# on the instance
assert cosmo.Tnu0 is cosmo._Tnu0
assert cosmo.Tnu0.unit == u.K
assert u.allclose(cosmo.Tnu0, 0.7137658555036082 * cosmo.Tcmb0, rtol=1e-5)
def test_has_massive_nu(self, cosmo_cls, cosmo):
"""Test property ``has_massive_nu``."""
# on the class
assert isinstance(cosmo_cls.has_massive_nu, property)
assert cosmo_cls.has_massive_nu.fset is None # immutable
# on the instance
if cosmo.Tnu0 == 0:
assert cosmo.has_massive_nu is False
else:
assert cosmo.has_massive_nu is cosmo._massivenu
def test_h(self, cosmo_cls, cosmo):
"""Test property ``h``."""
# on the class
assert isinstance(cosmo_cls.h, property)
assert cosmo_cls.h.fset is None # immutable
# on the instance
assert cosmo.h is cosmo._h
assert np.allclose(cosmo.h, cosmo.H0.value / 100.0)
def test_hubble_time(self, cosmo_cls, cosmo):
"""Test property ``hubble_time``."""
# on the class
assert isinstance(cosmo_cls.hubble_time, property)
assert cosmo_cls.hubble_time.fset is None # immutable
# on the instance
assert cosmo.hubble_time is cosmo._hubble_time
assert u.allclose(cosmo.hubble_time, (1 / cosmo.H0) << u.Gyr)
def test_hubble_distance(self, cosmo_cls, cosmo):
"""Test property ``hubble_distance``."""
# on the class
assert isinstance(cosmo_cls.hubble_distance, property)
assert cosmo_cls.hubble_distance.fset is None # immutable
# on the instance
assert cosmo.hubble_distance is cosmo._hubble_distance
assert cosmo.hubble_distance == (const.c / cosmo._H0).to(u.Mpc)
def test_critical_density0(self, cosmo_cls, cosmo):
"""Test property ``critical_density0``."""
# on the class
assert isinstance(cosmo_cls.critical_density0, property)
assert cosmo_cls.critical_density0.fset is None # immutable
# on the instance
assert cosmo.critical_density0 is cosmo._critical_density0
assert cosmo.critical_density0.unit == u.g / u.cm**3
cd0value = _critdens_const * (cosmo.H0.value * _H0units_to_invs) ** 2
assert cosmo.critical_density0.value == cd0value
def test_Ogamma0(self, cosmo_cls, cosmo):
"""Test property ``Ogamma0``."""
# on the class
assert isinstance(cosmo_cls.Ogamma0, property)
assert cosmo_cls.Ogamma0.fset is None # immutable
# on the instance
assert cosmo.Ogamma0 is cosmo._Ogamma0
# Ogamma cor \propto T^4/rhocrit
expect = _a_B_c2 * cosmo.Tcmb0.value**4 / cosmo.critical_density0.value
assert np.allclose(cosmo.Ogamma0, expect)
# check absolute equality to 0 if Tcmb0 is 0
if cosmo.Tcmb0 == 0:
assert cosmo.Ogamma0 == 0
def test_Onu0(self, cosmo_cls, cosmo):
"""Test property ``Onu0``."""
# on the class
assert isinstance(cosmo_cls.Onu0, property)
assert cosmo_cls.Onu0.fset is None # immutable
# on the instance
assert cosmo.Onu0 is cosmo._Onu0
# neutrino temperature <= photon temperature since the neutrinos
# decouple first.
if cosmo.has_massive_nu: # Tcmb0 > 0 & has massive
# check the expected formula
assert cosmo.Onu0 == cosmo.Ogamma0 * cosmo.nu_relative_density(0)
# a sanity check on on the ratio of neutrinos to photons
# technically it could be 1, but not for any of the tested cases.
assert cosmo.nu_relative_density(0) <= 1
elif cosmo.Tcmb0 == 0:
assert cosmo.Onu0 == 0
else:
# check the expected formula
assert cosmo.Onu0 == 0.22710731766 * cosmo._Neff * cosmo.Ogamma0
# and check compatibility with nu_relative_density
assert np.allclose(
cosmo.nu_relative_density(0), 0.22710731766 * cosmo._Neff
)
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`."""
assert (
cosmo.Otot0
== cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ode0 + cosmo.Ok0
)
# ---------------------------------------------------------------
# Methods
def test_w(self, cosmo):
"""Test abstract :meth:`astropy.cosmology.FLRW.w`."""
with pytest.raises(NotImplementedError, match="not implemented"):
cosmo.w(1)
def test_Otot(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
assert cosmo.Otot(1)
def test_efunc_vs_invefunc(self, cosmo):
"""
Test that efunc and inv_efunc give inverse values.
Here they just fail b/c no ``w(z)`` or no scipy.
"""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
cosmo.efunc(0.5)
with pytest.raises(exception):
cosmo.inv_efunc(0.5)
# ---------------------------------------------------------------
# from Cosmology
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# don't change any values
kwargs = cosmo._init_arguments.copy()
kwargs.pop("name", None) # make sure not setting name
kwargs.pop("meta", None) # make sure not setting name
c = cosmo.clone(**kwargs)
assert c.__class__ == cosmo.__class__
assert c == cosmo
# change ``H0``
# Note that H0 affects Ode0 because it changes Ogamma0
c = cosmo.clone(H0=100)
assert c.__class__ == cosmo.__class__
assert c.name == cosmo.name + " (modified)"
assert c.H0.value == 100
for n in set(cosmo.__parameters__) - {"H0"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
# change multiple things
c = cosmo.clone(name="new name", H0=100, Tcmb0=2.8, meta=dict(zz="tops"))
assert c.__class__ == cosmo.__class__
assert c.name == "new name"
assert c.H0.value == 100
assert c.Tcmb0.value == 2.8
assert c.meta == {**cosmo.meta, **dict(zz="tops")}
for n in set(cosmo.__parameters__) - {"H0", "Tcmb0"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value)
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to CosmologySubclassTest
# test against a FlatFLRWMixin
# case (3) in FLRW.is_equivalent
if isinstance(cosmo, FlatLambdaCDM):
assert cosmo.is_equivalent(Planck18)
assert Planck18.is_equivalent(cosmo)
else:
assert not cosmo.is_equivalent(Planck18)
assert not Planck18.is_equivalent(cosmo)
class FLRWSubclassTest(TestFLRW):
"""
Test subclasses of :class:`astropy.cosmology.FLRW`.
This is broken away from ``TestFLRW``, because ``FLRW`` is an ABC and
subclasses must override some methods.
"""
abstract_w = False
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
# ===============================================================
# Method & Attribute Tests
_FLRW_redshift_methods = get_redshift_methods(
FLRW, include_private=True, include_z2=False
)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize("method", _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
with pytest.raises(exc):
getattr(cosmo, method)(z)
@pytest.mark.parametrize("z", valid_zs)
@abc.abstractmethod
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.w`.
Since ``w`` is abstract, each test class needs to define further tests.
"""
# super().test_w(cosmo, z) # NOT b/c abstract `w(z)`
w = cosmo.w(z)
assert np.shape(w) == np.shape(z) # test same shape
assert u.Quantity(w).unit == u.one # test no units or dimensionless
# -------------------------------------------
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
# super().test_Otot(cosmo) # NOT b/c abstract `w(z)`
assert np.allclose(
cosmo.Otot(z),
cosmo.Om(z) + cosmo.Ogamma(z) + cosmo.Onu(z) + cosmo.Ode(z) + cosmo.Ok(z),
)
# ---------------------------------------------------------------
def test_efunc_vs_invefunc(self, cosmo):
"""Test that ``efunc`` and ``inv_efunc`` give inverse values.
Note that the test doesn't need scipy because it doesn't need to call
``de_density_scale``.
"""
# super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)`
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# -----------------------------------------------------------------------------
class ParameterFlatOde0TestMixin(ParameterOde0TestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a flat Cosmology.
This will augment or override some tests in ``ParameterOde0TestMixin``.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
super().test_Parameter_Ode0(cosmo_cls)
assert cosmo_cls.Ode0.derived in (True, np.True_)
def test_Ode0(self, cosmo):
"""Test no-longer-Parameter ``Ode0``."""
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0)
# Ode0 is not in the signature
with pytest.raises(TypeError, match="Ode0"):
cosmo_cls(*ba.args, **ba.kwargs, Ode0=1)
class FlatFLRWMixinTest(FlatCosmologyMixinTest, ParameterFlatOde0TestMixin):
"""Tests for :class:`astropy.cosmology.FlatFLRWMixin` subclasses.
E.g to use this class::
class TestFlatSomeFLRW(FlatFLRWMixinTest, TestSomeFLRW):
...
"""
def setup_class(self):
"""Setup for testing.
Set up as for regular FLRW test class, but remove dark energy component
since flat cosmologies are forbidden Ode0 as an argument,
see ``test_init_subclass``.
"""
super().setup_class(self)
self._cls_args.pop("Ode0")
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test initializing subclass, mostly that can't have Ode0 in init."""
super().test_init_subclass(cosmo_cls)
with pytest.raises(TypeError, match="subclasses of"):
class HASOde0SubClass(cosmo_cls):
def __init__(self, Ode0):
pass
_COSMOLOGY_CLASSES.pop(HASOde0SubClass.__qualname__, None)
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
super().test_init(cosmo_cls)
cosmo = cosmo_cls(*self.cls_args, **self.cls_kwargs)
assert cosmo._Ok0 == 0.0
assert cosmo._Ode0 == 1.0 - (
cosmo._Om0 + cosmo._Ogamma0 + cosmo._Onu0 + cosmo._Ok0
)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
super().test_Ok0(cosmo_cls, cosmo)
# for flat cosmologies, Ok0 is not *close* to 0, it *is* 0
assert cosmo.Ok0 == 0.0
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`. Should always be 1."""
super().test_Otot0(cosmo)
# for flat cosmologies, Otot0 is not *close* to 1, it *is* 1
assert cosmo.Otot0 == 1.0
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`. Should always be 1."""
super().test_Otot(cosmo, z)
# for flat cosmologies, Otot is 1, within precision.
assert u.allclose(cosmo.Otot(z), 1.0)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize(
"method", FLRWSubclassTest._FLRW_redshift_methods - {"Otot"}
)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ---------------------------------------------------------------
def test_clone_to_nonflat_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_to_nonflat_change_param(cosmo)
# change Ode0, without non-flat
with pytest.raises(TypeError):
cosmo.clone(Ode0=1)
# change to non-flat
nc = cosmo.clone(to_nonflat=True, Ode0=cosmo.Ode0)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
nc = cosmo.clone(to_nonflat=True, Ode0=1)
assert nc.Ode0 == 1.0
assert nc.name == cosmo.name + " (modified)"
# ---------------------------------------------------------------
def test_is_equivalent(self, cosmo, nonflatcosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to TestFLRW
# against non-flat Cosmology
assert not cosmo.is_equivalent(nonflatcosmo)
assert not nonflatcosmo.is_equivalent(cosmo)
# non-flat version of class
nonflat_cosmo_cls = cosmo.__nonflatclass__
# keys check in `test_is_equivalent_nonflat_class_different_params`
# non-flat
nonflat = nonflat_cosmo_cls(*self.cls_args, Ode0=0.9, **self.cls_kwargs)
assert not nonflat.is_equivalent(cosmo)
assert not cosmo.is_equivalent(nonflat)
# flat, but not FlatFLRWMixin
flat = nonflat_cosmo_cls(
*self.cls_args,
Ode0=1.0 - cosmo.Om0 - cosmo.Ogamma0 - cosmo.Onu0,
**self.cls_kwargs
)
flat._Ok0 = 0.0
assert flat.is_equivalent(cosmo)
assert cosmo.is_equivalent(flat)
def test_repr(self, cosmo_cls, cosmo):
"""
Test method ``.__repr__()``. Skip non-flat superclass test.
e.g. `TestFlatLambdaCDDM` -> `FlatFLRWMixinTest`
vs `TestFlatLambdaCDDM` -> `TestLambdaCDDM` -> `FlatFLRWMixinTest`
"""
FLRWSubclassTest.test_repr(self, cosmo_cls, cosmo)
# test eliminated Ode0 from parameters
assert "Ode0" not in repr(cosmo)
|
6d651da67c2034e79c364cca9716e24845492ea89b21b2ca9cd4481335de4901 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.w0wzcdm`."""
##############################################################################
# IMPORTS
# STDLIB
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import w0wzCDM
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.test_core import ParameterTestMixin
from .test_base import FLRWSubclassTest
from .test_w0cdm import Parameterw0TestMixin
##############################################################################
# TESTS
##############################################################################
class ParameterwzTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wz on a Cosmology.
wz is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wz(self, cosmo_cls, cosmo):
"""Test Parameter ``wz``."""
# on the class
assert isinstance(cosmo_cls.wz, Parameter)
assert "Derivative of the dark energy" in cosmo_cls.wz.__doc__
assert cosmo_cls.wz.unit is None
# on the instance
assert cosmo.wz is cosmo._wz
assert cosmo.wz == self.cls_kwargs["wz"]
def test_init_wz(self, cosmo_cls, ba):
"""Test initialization for values of ``wz``."""
# test that it works with units
ba.arguments["wz"] = ba.arguments["wz"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wz == ba.arguments["wz"]
# also without units
ba.arguments["wz"] = ba.arguments["wz"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wz == ba.arguments["wz"]
# must be dimensionless
ba.arguments["wz"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class Testw0wzCDM(FLRWSubclassTest, Parameterw0TestMixin, ParameterwzTestMixin):
"""Test :class:`astropy.cosmology.w0wzCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = w0wzCDM
self.cls_kwargs.update(w0=-1, wz=0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(w0=0.1, wz=0.2)
assert c.w0 == 0.1
assert c.wz == 0.2
for n in set(cosmo.__parameters__) - {"w0", "wz"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
# @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below
def test_w(self, cosmo):
"""Test :meth:`astropy.cosmology.w0wzCDM.w`."""
# super().test_w(cosmo, z)
assert u.allclose(cosmo.w(1.0), -0.5)
assert u.allclose(
cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), [-1.0, -0.75, -0.5, -0.25, 0.15]
)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'w0wzCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,'
" Ode0=0.73, w0=-1.0, wz=0.5, Tcmb0=3.0 K, Neff=3.04,"
" m_nu=[0. 0. 0.] eV, Ob0=0.03)"
)
assert repr(cosmo) == expected
|
04bd4685b9cb96da5e9101431b6bb62cf402d37d0b7034405d8dcd667c9583b7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.w0wacdm`."""
##############################################################################
# IMPORTS
# STDLIB
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import Flatw0waCDM, w0waCDM
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.test_core import ParameterTestMixin
from .test_base import FlatFLRWMixinTest, FLRWSubclassTest
from .test_w0cdm import Parameterw0TestMixin
##############################################################################
# TESTS
##############################################################################
class ParameterwaTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wa on a Cosmology.
wa is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wa(self, cosmo_cls, cosmo):
"""Test Parameter ``wa``."""
# on the class
assert isinstance(cosmo_cls.wa, Parameter)
assert "Negative derivative" in cosmo_cls.wa.__doc__
assert cosmo_cls.wa.unit is None
# on the instance
assert cosmo.wa is cosmo._wa
assert cosmo.wa == self.cls_kwargs["wa"]
def test_init_wa(self, cosmo_cls, ba):
"""Test initialization for values of ``wa``."""
# test that it works with units
ba.arguments["wa"] = ba.arguments["wa"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wa == ba.arguments["wa"]
# also without units
ba.arguments["wa"] = ba.arguments["wa"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wa == ba.arguments["wa"]
# must be dimensionless
ba.arguments["wa"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class Testw0waCDM(FLRWSubclassTest, Parameterw0TestMixin, ParameterwaTestMixin):
"""Test :class:`astropy.cosmology.w0waCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = w0waCDM
self.cls_kwargs.update(w0=-1, wa=-0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(w0=0.1, wa=0.2)
assert c.w0 == 0.1
assert c.wa == 0.2
for n in set(cosmo.__parameters__) - {"w0", "wa"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
# @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below
def test_w(self, cosmo):
"""Test :meth:`astropy.cosmology.w0waCDM.w`."""
# super().test_w(cosmo, z)
assert u.allclose(cosmo.w(1.0), -1.25)
assert u.allclose(
cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1, -1.16666667, -1.25, -1.3, -1.34848485],
)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'w0waCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,'
" Ode0=0.73, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04,"
" m_nu=[0. 0. 0.] eV, Ob0=0.03)"
)
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class TestFlatw0waCDM(FlatFLRWMixinTest, Testw0waCDM):
"""Test :class:`astropy.cosmology.Flatw0waCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = Flatw0waCDM
self.cls_kwargs.update(w0=-1, wa=-0.5)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'Flatw0waCDM(name="ABCMeta", H0=70.0 km / (Mpc s),'
" Om0=0.27, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04,"
" m_nu=[0. 0. 0.] eV, Ob0=0.03)"
)
assert repr(cosmo) == expected
|
60b5b4120a5a222cc28f401e81fc7dda96ff988e3b1920f63ae7712548b4cb10 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.w0cdm`."""
##############################################################################
# IMPORTS
# STDLIB
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import FlatwCDM, wCDM
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.test_core import ParameterTestMixin, valid_zs
from .test_base import FlatFLRWMixinTest, FLRWSubclassTest
##############################################################################
# TESTS
##############################################################################
class Parameterw0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` w0 on a Cosmology.
w0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_w0(self, cosmo_cls, cosmo):
"""Test Parameter ``w0``."""
# on the class
assert isinstance(cosmo_cls.w0, Parameter)
assert "Dark energy equation of state" in cosmo_cls.w0.__doc__
assert cosmo_cls.w0.unit is None
# on the instance
assert cosmo.w0 is cosmo._w0
assert cosmo.w0 == self.cls_kwargs["w0"]
def test_init_w0(self, cosmo_cls, ba):
"""Test initialization for values of ``w0``."""
# test that it works with units
ba.arguments["w0"] = ba.arguments["w0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.w0 == ba.arguments["w0"]
# also without units
ba.arguments["w0"] = ba.arguments["w0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.w0 == ba.arguments["w0"]
# must be dimensionless
ba.arguments["w0"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class TestwCDM(FLRWSubclassTest, Parameterw0TestMixin):
"""Test :class:`astropy.cosmology.wCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = wCDM
self.cls_kwargs.update(w0=-0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(w0=0.1)
assert c.w0 == 0.1
for n in set(cosmo.__parameters__) - {"w0"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
@pytest.mark.parametrize("z", valid_zs)
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.wCDM.w`."""
super().test_w(cosmo, z)
w = cosmo.w(z)
assert u.allclose(w, self.cls_kwargs["w0"])
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'wCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,'
" Ode0=0.73, w0=-0.5, Tcmb0=3.0 K, Neff=3.04,"
" m_nu=[0. 0. 0.] eV, Ob0=0.03)"
)
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class TestFlatwCDM(FlatFLRWMixinTest, TestwCDM):
"""Test :class:`astropy.cosmology.FlatwCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = FlatwCDM
self.cls_kwargs.update(w0=-0.5)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'FlatwCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,'
" w0=-0.5, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)"
)
assert repr(cosmo) == expected
|
1c1393c7a86432e1baa46897a71533657e0b72558fc29df4fbe7725d8c7b1d6a | # Load the WCS information from a fits header, and use it
# to convert pixel coordinates to world coordinates.
import sys
import numpy as np
from astropy import wcs
from astropy.io import fits
def load_wcs_from_file(filename):
# Load the FITS hdulist using astropy.io.fits
hdulist = fits.open(filename)
# Parse the WCS keywords in the primary HDU
w = wcs.WCS(hdulist[0].header)
# Print out the "name" of the WCS, as defined in the FITS header
print(w.wcs.name)
# Print out all of the settings that were parsed from the header
w.wcs.print_contents()
# Three pixel coordinates of interest.
# Note we've silently assumed an NAXIS=2 image here.
# The pixel coordinates are pairs of [X, Y].
# The "origin" argument indicates whether the input coordinates
# are 0-based (as in Numpy arrays) or
# 1-based (as in the FITS convention, for example coordinates
# coming from DS9).
pixcrd = np.array([[0, 0], [24, 38], [45, 98]], dtype=np.float64)
# Convert pixel coordinates to world coordinates
# The second argument is "origin" -- in this case we're declaring we
# have 0-based (Numpy-like) coordinates.
world = w.wcs_pix2world(pixcrd, 0)
print(world)
# Convert the same coordinates back to pixel coordinates.
pixcrd2 = w.wcs_world2pix(world, 0)
print(pixcrd2)
# These should be the same as the original pixel coordinates, modulo
# some floating-point error.
assert np.max(np.abs(pixcrd - pixcrd2)) < 1e-6
# The example below illustrates the use of "origin" to convert between
# 0- and 1- based coordinates when executing the forward and backward
# WCS transform.
x = 0
y = 0
origin = 0
assert (w.wcs_pix2world(x, y, origin) ==
w.wcs_pix2world(x + 1, y + 1, origin + 1))
if __name__ == '__main__':
load_wcs_from_file(sys.argv[-1])
|
56ddfcf82c7380e47d96c92ebf0959f8cc7684c406db53c2e8dbed03315efcb3 | # Set the WCS information manually by setting properties of the WCS
# object.
import numpy as np
from astropy import wcs
from astropy.io import fits
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
# Set up an "Airy's zenithal" projection
# Vector properties may be set with Python lists, or Numpy arrays
w.wcs.crpix = [-234.75, 8.3393]
w.wcs.cdelt = np.array([-0.066667, 0.066667])
w.wcs.crval = [0, -90]
w.wcs.ctype = ["RA---AIR", "DEC--AIR"]
w.wcs.set_pv([(2, 1, 45.0)])
# Three pixel coordinates of interest.
# The pixel coordinates are pairs of [X, Y].
# The "origin" argument indicates whether the input coordinates
# are 0-based (as in Numpy arrays) or
# 1-based (as in the FITS convention, for example coordinates
# coming from DS9).
pixcrd = np.array([[0, 0], [24, 38], [45, 98]], dtype=np.float64)
# Convert pixel coordinates to world coordinates.
# The second argument is "origin" -- in this case we're declaring we
# have 0-based (Numpy-like) coordinates.
world = w.wcs_pix2world(pixcrd, 0)
print(world)
# Convert the same coordinates back to pixel coordinates.
pixcrd2 = w.wcs_world2pix(world, 0)
print(pixcrd2)
# These should be the same as the original pixel coordinates, modulo
# some floating-point error.
assert np.max(np.abs(pixcrd - pixcrd2)) < 1e-6
# The example below illustrates the use of "origin" to convert between
# 0- and 1- based coordinates when executing the forward and backward
# WCS transform.
x = 0
y = 0
origin = 0
assert (w.wcs_pix2world(x, y, origin) ==
w.wcs_pix2world(x + 1, y + 1, origin + 1))
# Now, write out the WCS object as a FITS header
header = w.to_header()
# header is an astropy.io.fits.Header object. We can use it to create a new
# PrimaryHDU and write it to a file.
hdu = fits.PrimaryHDU(header=header)
# Save to FITS file
# hdu.writeto('test.fits')
|
9720a3e158d751a575c771fb7f5288e768e75c6a937139a6891d5948cad4564f | # Define the astropy.wcs.WCS object using a Python dictionary as input
import astropy.wcs
wcs_dict = {
'CTYPE1': 'WAVE ', 'CUNIT1': 'Angstrom', 'CDELT1': 0.2, 'CRPIX1': 0, 'CRVAL1': 10, 'NAXIS1': 5,
'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'deg', 'CDELT2': 0.5, 'CRPIX2': 2, 'CRVAL2': 0.5, 'NAXIS2': 4,
'CTYPE3': 'HPLN-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.4, 'CRPIX3': 2, 'CRVAL3': 1, 'NAXIS3': 3}
input_wcs = astropy.wcs.WCS(wcs_dict)
|
2989b4abae9030d1e08f908e8f879dc54b48e7e3f523fb57969adf57b29240ab | # NOTE: this hook should be added to
# https://github.com/pyinstaller/pyinstaller-hooks-contrib
# once that repository is ready for pull requests
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files("skyfield")
|
a323ad380ce9a299ad15e31b99c9f4575e2a09a926be16c63e2a09c8b6c7c832 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file is the main file used when running tests with pytest directly,
# in particular if running e.g. ``pytest docs/``.
import os
import tempfile
import hypothesis
from astropy import __version__
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
except ImportError:
PYTEST_HEADER_MODULES = {}
TESTED_VERSIONS = {}
# This has to be in the root dir or it will not display in CI.
def pytest_configure(config):
PYTEST_HEADER_MODULES["PyERFA"] = "erfa"
PYTEST_HEADER_MODULES["Cython"] = "cython"
PYTEST_HEADER_MODULES["Scikit-image"] = "skimage"
PYTEST_HEADER_MODULES["asdf"] = "asdf"
PYTEST_HEADER_MODULES["pyarrow"] = "pyarrow"
TESTED_VERSIONS["Astropy"] = __version__
# This has to be in the root dir or it will not display in CI.
def pytest_report_header(config):
# This gets added after the pytest-astropy-header output.
return (
f'ARCH_ON_CI: {os.environ.get("ARCH_ON_CI", "undefined")}\n'
f'IS_CRON: {os.environ.get("IS_CRON", "undefined")}\n'
)
# Tell Hypothesis that we might be running slow tests, to print the seed blob
# so we can easily reproduce failures from CI, and derive a fuzzing profile
# to try many more inputs when we detect a scheduled build or when specifically
# requested using the HYPOTHESIS_PROFILE=fuzz environment variable or
# `pytest --hypothesis-profile=fuzz ...` argument.
hypothesis.settings.register_profile(
"ci", deadline=None, print_blob=True, derandomize=True
)
hypothesis.settings.register_profile(
"fuzzing", deadline=None, print_blob=True, max_examples=1000
)
default = (
"fuzzing"
if (
os.environ.get("IS_CRON") == "true"
and os.environ.get("ARCH_ON_CI") not in ("aarch64", "ppc64le")
)
else "ci"
)
hypothesis.settings.load_profile(os.environ.get("HYPOTHESIS_PROFILE", default))
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ["XDG_CONFIG_HOME"] = tempfile.mkdtemp("astropy_config")
os.environ["XDG_CACHE_HOME"] = tempfile.mkdtemp("astropy_cache")
os.mkdir(os.path.join(os.environ["XDG_CONFIG_HOME"], "astropy"))
os.mkdir(os.path.join(os.environ["XDG_CACHE_HOME"], "astropy"))
# Note that we don't need to change the environment variables back or remove
# them after testing, because they are only changed for the duration of the
# Python process, and this configuration only matters if running pytest
# directly, not from e.g. an IPython session.
|
25e2dd1f73ff41502ce92cae2b7fbd46e4f9352638b02e3082e807ffac947278 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory.
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# See sphinx_astropy.conf for which values are set there.
import configparser
import doctest
import os
import sys
from datetime import datetime
from importlib import metadata
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
# -- Check for missing dependencies -------------------------------------------
missing_requirements = {}
for line in metadata.requires("astropy"):
if 'extra == "docs"' in line:
req = Requirement(line.split(";")[0])
req_package = req.name.lower()
req_specifier = str(req.specifier)
try:
version = metadata.version(req_package)
except metadata.PackageNotFoundError:
missing_requirements[req_package] = req_specifier
if version not in SpecifierSet(req_specifier, prereleases=True):
missing_requirements[req_package] = req_specifier
if missing_requirements:
print(
"The following packages could not be found and are required to "
"build the documentation:"
)
for key, val in missing_requirements.items():
print(f" * {key} {val}")
print('Please install the "docs" requirements.')
sys.exit(1)
from sphinx_astropy.conf.v1 import * # noqa: E402, F403
from sphinx_astropy.conf.v1 import (
numpydoc_xref_aliases,
numpydoc_xref_astropy_aliases,
numpydoc_xref_ignore,
rst_epilog,
)
# -- Plot configuration -------------------------------------------------------
plot_rcparams = {}
plot_rcparams["figure.figsize"] = (6, 6)
plot_rcparams["savefig.facecolor"] = "none"
plot_rcparams["savefig.bbox"] = "tight"
plot_rcparams["axes.labelsize"] = "large"
plot_rcparams["figure.subplot.hspace"] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ["png", "svg", "pdf"]
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.7"
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("X.Y.Z")` here.
check_sphinx_version("1.2.1") # noqa: F405
# The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for
# the benefit of other packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping["astropy"] # noqa: F405
# add any custom intersphinx for astropy
# fmt: off
intersphinx_mapping["astropy-dev"] = ("https://docs.astropy.org/en/latest/", None) # noqa: F405
intersphinx_mapping["pyerfa"] = ("https://pyerfa.readthedocs.io/en/stable/", None) # noqa: F405
intersphinx_mapping["pytest"] = ("https://docs.pytest.org/en/stable/", None) # noqa: F405
intersphinx_mapping["ipython"] = ("https://ipython.readthedocs.io/en/stable/", None) # noqa: F405
intersphinx_mapping["pandas"] = ("https://pandas.pydata.org/pandas-docs/stable/", None) # noqa: E501, F405
intersphinx_mapping["sphinx_automodapi"] = ("https://sphinx-automodapi.readthedocs.io/en/stable/", None) # noqa: E501, F405
intersphinx_mapping["packagetemplate"] = ("https://docs.astropy.org/projects/package-template/en/latest/", None) # noqa: E501, F405
intersphinx_mapping["h5py"] = ("https://docs.h5py.org/en/stable/", None) # noqa: F405
intersphinx_mapping["asdf-astropy"] = ("https://asdf-astropy.readthedocs.io/en/latest/", None) # noqa: F405
intersphinx_mapping["fsspec"] = ("https://filesystem-spec.readthedocs.io/en/latest/", None) # noqa: F405
# fmt: on
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append("_templates") # noqa: F405
exclude_patterns.append("changes") # noqa: F405
exclude_patterns.append("_pkgtemplate.rst") # noqa: F405
exclude_patterns.append( # noqa: F405
"**/*.inc.rst"
) # .inc.rst mean *include* files, don't have sphinx process them
# Add any paths that contain templates here, relative to this directory.
if "templates_path" not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append("_templates")
extensions += ["sphinx_changelog"] # noqa: F405
# Grab minversion from setup.cfg
setup_cfg = configparser.ConfigParser()
setup_cfg.read(os.path.join(os.path.pardir, "setup.cfg"))
__minimum_python_version__ = setup_cfg["options"]["python_requires"].replace(">=", "")
project = "Astropy"
min_versions = {}
for line in metadata.requires("astropy"):
req = Requirement(line.split(";")[0])
min_versions[req.name.lower()] = str(req.specifier)
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
with open("common_links.txt") as cl:
rst_epilog += cl.read().format(
minimum_python=__minimum_python_version__, **min_versions
)
# Manually register doctest options since matplotlib 3.5 messed up allowing them
# from pytest-doctestplus
IGNORE_OUTPUT = doctest.register_optionflag("IGNORE_OUTPUT")
REMOTE_DATA = doctest.register_optionflag("REMOTE_DATA")
FLOAT_CMP = doctest.register_optionflag("FLOAT_CMP")
# Whether to create cross-references for the parameter types in the
# Parameters, Other Parameters, Returns and Yields sections of the docstring.
numpydoc_xref_param_type = True
# Words not to cross-reference. Most likely, these are common words used in
# parameter type descriptions that may be confused for classes of the same
# name. The base set comes from sphinx-astropy. We add more here.
numpydoc_xref_ignore.update(
{
"mixin",
"Any", # aka something that would be annotated with `typing.Any`
# needed in subclassing numpy # TODO! revisit
"Arguments",
"Path",
# TODO! not need to ignore.
"flag",
"bits",
}
)
# Mappings to fully qualified paths (or correct ReST references) for the
# aliases/shortcuts used when specifying the types of parameters.
# Numpy provides some defaults
# https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94
# and a base set comes from sphinx-astropy.
# so here we mostly need to define Astropy-specific x-refs
numpydoc_xref_aliases.update(
{
# python & adjacent
"Any": "`~typing.Any`",
"file-like": ":term:`python:file-like object`",
"file": ":term:`python:file object`",
"path-like": ":term:`python:path-like object`",
"module": ":term:`python:module`",
"buffer-like": ":term:buffer-like",
"hashable": ":term:`python:hashable`",
# for matplotlib
"color": ":term:`color`",
# for numpy
"ints": ":class:`python:int`",
# for astropy
"number": ":term:`number`",
"Representation": ":class:`~astropy.coordinates.BaseRepresentation`",
"writable": ":term:`writable file-like object`",
"readable": ":term:`readable file-like object`",
"BaseHDU": ":doc:`HDU </io/fits/api/hdus>`",
}
)
# Add from sphinx-astropy 1) glossary aliases 2) physical types.
numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases)
# -- Project information ------------------------------------------------------
author = "The Astropy Developers"
copyright = f"2011–{datetime.utcnow().year}, " + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = metadata.version(project)
# The short X.Y version.
version = ".".join(release.split(".")[:2])
# Only include dev docs in dev version.
dev = "dev" in release
if not dev:
exclude_patterns.append("development/*") # noqa: F405
exclude_patterns.append("testhelpers.rst") # noqa: F405
# -- Options for the module index ---------------------------------------------
modindex_common_prefix = ["astropy."]
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
# html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
# html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
# html_theme = None
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f"{project} v{release}"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {"to_be_indexed": ["stable", "latest"], "is_development": dev}
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", project + ".tex", project + " Documentation", author, "manual")
]
latex_logo = "_static/astropy_logo.pdf"
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", project.lower(), project + " Documentation", [author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = "https://github.com/astropy/astropy/issues/"
edit_on_github_branch = "main"
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
# This is not used. See docs/nitpick-exceptions file for the actual listing.
nitpick_ignore = []
for line in open("nitpick-exceptions"):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import warnings
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
"backreferences_dir": "generated/modules", # path to store the module using example template # noqa: E501
"filename_pattern": "^((?!skip_).)*$", # execute all examples except those that start with "skip_" # noqa: E501
"examples_dirs": f"..{os.sep}examples", # path to the examples scripts
"gallery_dirs": "generated/examples", # path to save gallery generated examples
"reference_url": {
"astropy": None,
"matplotlib": "https://matplotlib.org/stable/",
"numpy": "https://numpy.org/doc/stable/",
},
"abort_on_example_error": True,
}
# Filter out backend-related warnings as described in
# https://github.com/sphinx-gallery/sphinx-gallery/pull/564
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=(
"Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure."
),
)
except ImportError:
sphinx_gallery = None
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = [
"https://journals.aas.org/manuscript-preparation/",
"https://maia.usno.navy.mil/",
"https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer",
"https://aa.usno.navy.mil/publications/docs/Circular_179.php",
"http://data.astropy.org",
"https://doi.org/10.1017/S0251107X00002406", # internal server error
"https://doi.org/10.1017/pasa.2013.31", # internal server error
"https://pyfits.readthedocs.io/en/v3.2.1/", # defunct page in CHANGES.rst
r"https://github\.com/astropy/astropy/(?:issues|pull)/\d+",
]
linkcheck_timeout = 180
linkcheck_anchors = False
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ["robots.txt"]
def rstjinja(app, docname, source):
"""Render pages as a jinja template to hide/show dev docs."""
# Make sure we're outputting HTML
if app.builder.format != "html":
return
files_to_render = ["index", "install"]
if docname in files_to_render:
print(f"Jinja rendering {docname}")
rendered = app.builder.templates.render_string(
source[0], app.config.html_context
)
source[0] = rendered
def resolve_astropy_and_dev_reference(app, env, node, contnode):
"""
Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases.
Documentation links in astropy can be set up as intersphinx links so that
affiliate packages do not have to override the docstrings when building
the docs.
If we are building the development docs it is a local ref targeting the
label ``astropy-dev:<label>``, but for stable docs it should be an
intersphinx resolution to the development docs.
See https://github.com/astropy/astropy/issues/11366
"""
# should the node be processed?
reftarget = node.get("reftarget") # str or None
if str(reftarget).startswith("astropy:"):
# This allows Astropy to use intersphinx links to itself and have
# them resolve to local links. Downstream packages will see intersphinx.
# TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented.
process, replace = True, "astropy:"
elif dev and str(reftarget).startswith("astropy-dev:"):
process, replace = True, "astropy-dev:"
else:
process, replace = False, ""
# make link local
if process:
reftype = node.get("reftype")
refdoc = node.get("refdoc", app.env.docname)
# convert astropy intersphinx targets to local links.
# there are a few types of intersphinx link patters, as described in
# https://docs.readthedocs.io/en/stable/guides/intersphinx.html
reftarget = reftarget.replace(replace, "")
if reftype == "doc": # also need to replace the doc link
node.replace_attr("reftarget", reftarget)
# Delegate to the ref node's original domain/target (typically :ref:)
try:
domain = app.env.domains[node["refdomain"]]
return domain.resolve_xref(
app.env, refdoc, app.builder, reftype, reftarget, node, contnode
)
except Exception:
pass
# Otherwise return None which should delegate to intersphinx
def setup(app):
if sphinx_gallery is None:
msg = (
"The sphinx_gallery extension is not installed, so the "
"gallery will not be built. You will probably see "
"additional warnings about undefined references due "
"to this."
)
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# Generate the page from Jinja template
app.connect("source-read", rstjinja)
# Set this to higher priority than intersphinx; this way when building
# dev docs astropy-dev: targets will go to the local docs instead of the
# intersphinx mapping
app.connect("missing-reference", resolve_astropy_and_dev_reference, priority=400)
|
0607dc3a62a97899e8b17121e20f37d1e64a3beef7407c200ed9f48c855c0327 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import numpy as np
import astropy.units as u
from . import _stats
__all__ = [
"gaussian_fwhm_to_sigma",
"gaussian_sigma_to_fwhm",
"binom_conf_interval",
"binned_binom_proportion",
"poisson_conf_interval",
"median_absolute_deviation",
"mad_std",
"signal_to_noise_oir_ccd",
"bootstrap",
"kuiper",
"kuiper_two",
"kuiper_false_positive_probability",
"cdf_from_intervals",
"interval_overlap_length",
"histogram_intervals",
"fold_intervals",
]
__doctest_skip__ = ["binned_binom_proportion"]
__doctest_requires__ = {
"binom_conf_interval": ["scipy"],
"poisson_conf_interval": ["scipy"],
}
gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1.0 / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
def binom_conf_interval(k, n, confidence_level=0.68269, interval="wilson"):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]])
""" # noqa: E501
if confidence_level < 0.0 or confidence_level > 1.0:
raise ValueError("confidence_level must be between 0. and 1.")
alpha = 1.0 - confidence_level
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError("n must be positive")
if (k < 0).any() or (k > n).any():
raise ValueError("k must be in {0, 1, .., n}")
if interval == "wilson" or interval == "wald":
from scipy.special import erfinv
kappa = np.sqrt(2.0) * min(erfinv(confidence_level), 1.0e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == "wilson":
midpoint = (k + kappa**2 / 2.0) / (n + kappa**2)
halflength = (
(kappa * np.sqrt(n))
/ (n + kappa**2)
* np.sqrt(p * (1 - p) + kappa**2 / (4 * n))
)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.0] = 0.0
conf_interval[conf_interval > 1.0] = 1.0
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1.0 - p) / n)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
elif interval == "jeffreys" or interval == "flat":
from scipy.special import betaincinv
if interval == "jeffreys":
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1.0 - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1.0 - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.0
elif k == n:
upperbound = 1.0
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f"Unrecognized interval: {interval:s}")
return conf_interval
def binned_binom_proportion(
x, success, bins=10, range=None, confidence_level=0.68269, interval="wilson"
):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError("sizes of x and success must match")
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.0
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(
k, n, confidence_level=confidence_level, interval=interval
)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, confidence_level, name):
if sigma != 1:
raise ValueError(f"Only sigma=1 supported for interval {name}")
if background != 0:
raise ValueError(f"background not supported for interval {name}")
if confidence_level is not None:
raise ValueError(f"confidence_level not supported for interval {name}")
def poisson_conf_interval(
n, interval="root-n", sigma=1, background=0, confidence_level=None
):
r"""Poisson parameter confidence interval given observed counts
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also discusses several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <http://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]])
""" # noqa: E501
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == "root-n":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
elif interval == "root-n-0":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == "pearson":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array(
[n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)]
)
elif interval == "sherpagehrels":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)])
elif interval == "frequentist-confidence":
_check_poisson_conf_inputs(1.0, background, confidence_level, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array(
[
0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha),
]
)
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == "kraft-burrows-nousek":
# Deprecation warning in Python 3.9 when N is float, so we force int,
# see https://github.com/astropy/astropy/issues/10832
if np.isscalar(n):
if not isinstance(n, int):
raise TypeError("Number of counts must be integer.")
elif not issubclass(n.dtype.type, np.integer):
raise TypeError("Number of counts must be integer.")
if confidence_level is None:
raise ValueError(
f"Set confidence_level for method {interval}. (sigma is ignored.)"
)
confidence_level = np.asanyarray(confidence_level)
if np.any(confidence_level <= 0) or np.any(confidence_level >= 1):
raise ValueError("confidence_level must be a number between 0 and 1.")
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError("Background must be >= 0.")
conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)(
n, background, confidence_level
)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}")
return conf_interval
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median # drops units if result is NaN
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (
isinstance(data, u.Quantity)
and func is np.median
and data_median.ndim == 0
and np.isnan(data_median)
):
data_median = data.__array_wrap__(data_median)
# broadcast the median array before subtraction
if axis is not None:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (
isinstance(data, u.Quantity)
and func is np.median
and result.ndim == 0
and np.isnan(result)
):
result = data.__array_wrap__(result)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(
t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd**2
)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <http://mpmath.org/>`_ library.
"""
from math import exp
from scipy.integrate import quad
from scipy.optimize import brentq
from scipy.special import factorial
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
return 1.0 / (exp(-B) * np.sum(np.power(B, n) / factorial(n)))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.0
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <http://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
"""
from mpmath import exp, factorial, findroot, fsum, mpf, power, quad
# We convert these values to float. Because for some reason,
# mpmath.mpf cannot convert from numpy.int64
N = mpf(float(N))
B = mpf(float(B))
CL = mpf(float(CL))
tol = 1e-4
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1.0 / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
# If B > N, then N-B, the "most probable" values is < 0
# and thus s_min is certainly 0.
# Note: For small N, s_max is also close to 0 and root finding
# might find the wrong root, thus it is important to handle this
# case here and return the analytical answer (s_min = 0).
if (B >= N) or (eqn7(0, N, B) >= y_S_max):
return 0.0
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, [0.0, N - B], solver="ridder", tol=tol)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
# Several numerical problems were found prevent the solvers from finding
# the roots unless the starting values are very close to the final values.
# Thus, this primitive, time-wasting, brute-force stepping here to get
# an interval that can be fed into the ridder solver.
s_max_guess = max(N - B, 1.0)
while func(s_max_guess) < 0:
s_max_guess += 1
S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver="ridder", tol=tol)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
"""
from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError("Need mpmath package for input numbers this large.")
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError("Either scipy or mpmath are required.")
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import comb, factorial
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import comb, factorial
if D < 0.0 or D > 2.0:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2.0 / N:
return 1.0 - factorial(N) * (D - 1.0 / N) ** (N - 1)
elif D < 3.0 / N:
k = -(N * D - 1.0) / 2.0
r = np.sqrt(k**2 - (N * D - 2.0) ** 2 / 2.0)
a, b = -k + r, -k - r
return 1 - (
factorial(N - 1)
* (b ** (N - 1) * (1 - a) - a ** (N - 1) * (1 - b))
/ N ** (N - 2)
/ (b - a)
)
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.0) / (2.0 * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y ** (t - 3) * (
y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2
)
term1 = comb(N, t)
term2 = (1 - D - t / N) ** (N - t - 1)
# term1 is formally finite, but is approximated by numpy as np.inf for
# large values, so we set them to zero manually when they would be
# multiplied by zero anyway
term1[(term1 == np.inf) & (term2 == 0)] = 0.0
final_term = Tt * term1 * term2
return final_term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (
ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)
).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = np.amax(cdfv - np.arange(N) / float(N)) + np.amax(
(np.arange(N) + 1) / float(N) - cdfv
)
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
(n1,) = data1.shape
(n2,) = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (
np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)
):
raise ValueError("kuiper_two only accepts real inputs")
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError("kuiper_two only accepts non-nan inputs")
D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for a, b, wt in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.0)
breaks.add(1.0)
breaks = sorted(breaks)
breaks_map = {f: i for (i, f) in enumerate(breaks)}
totals = np.zeros(len(breaks) - 1)
totals += tot
for a, b, wt in r:
totals[breaks_map[a] : breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError("Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.0
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end))
h[j] += ol / (1.0 / n) * totals[i]
start = end
return h
|
b3d3099daa074b79bc170ac404d888159297f43d57a47f09e143ed7a993856db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import fnmatch
import os
import re
import sys
import numpy as np
from astropy import log
from astropy.utils.console import Getch, color_print, conf, terminal_size
from astropy.utils.data_info import dtype_info_name
__all__ = []
def default_format_func(format_, val):
if isinstance(val, bytes):
return val.decode("utf-8", errors="replace")
else:
return str(val)
# The first three functions are helpers for _auto_format_func
def _use_str_for_masked_values(format_func):
"""Wrap format function to trap masked values.
String format functions and most user functions will not be able to deal
with masked values, so we wrap them to ensure they are passed to str().
"""
return lambda format_, val: (
str(val) if val is np.ma.masked else format_func(format_, val)
)
def _possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
"""
yield lambda format_, val: format(val, format_)
yield lambda format_, val: format_.format(val)
yield lambda format_, val: format_ % val
yield lambda format_, val: format_.format(**{k: val[k] for k in val.dtype.names})
def get_auto_format_func(
col=None, possible_string_format_functions=_possible_string_format_functions
):
"""
Return a wrapped ``auto_format_func`` function which is used in
formatting table columns. This is primarily an internal function but
gets used directly in other parts of astropy, e.g. `astropy.io.ascii`.
Parameters
----------
col_name : object, optional
Hashable object to identify column like id or name. Default is None.
possible_string_format_functions : func, optional
Function that yields possible string formatting functions
(defaults to internal function to do this).
Returns
-------
Wrapped ``auto_format_func`` function
"""
def _auto_format_func(format_, val):
"""Format ``val`` according to ``format_`` for a plain format specifier,
old- or new-style format strings, or using a user supplied function.
More importantly, determine and cache (in _format_funcs) a function
that will do this subsequently. In this way this complicated logic is
only done for the first value.
Returns the formatted value.
"""
if format_ is None:
return default_format_func(format_, val)
if format_ in col.info._format_funcs:
return col.info._format_funcs[format_](format_, val)
if callable(format_):
format_func = lambda format_, val: format_(val) # noqa: E731
try:
out = format_func(format_, val)
if not isinstance(out, str):
raise ValueError(
"Format function for value {} returned {} "
"instead of string type".format(val, type(val))
)
except Exception as err:
# For a masked element, the format function call likely failed
# to handle it. Just return the string representation for now,
# and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
raise ValueError(f"Format function for value {val} failed.") from err
# If the user-supplied function handles formatting masked elements, use
# it directly. Otherwise, wrap it in a function that traps them.
try:
format_func(format_, np.ma.masked)
except Exception:
format_func = _use_str_for_masked_values(format_func)
else:
# For a masked element, we cannot set string-based format functions yet,
# as all tests below will fail. Just return the string representation
# of masked for now, and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
for format_func in possible_string_format_functions(format_):
try:
# Does this string format method work?
out = format_func(format_, val)
# Require that the format statement actually did something.
if out == format_:
raise ValueError("the format passed in did nothing.")
except Exception:
continue
else:
break
else:
# None of the possible string functions passed muster.
raise ValueError(
f"unable to parse format string {format_} for its column."
)
# String-based format functions will fail on masked elements;
# wrap them in a function that traps them.
format_func = _use_str_for_masked_values(format_func)
col.info._format_funcs[format_] = format_func
return out
return _auto_format_func
def _get_pprint_include_names(table):
"""Get the set of names to show in pprint from the table pprint_include_names
and pprint_exclude_names attributes.
These may be fnmatch unix-style globs.
"""
def get_matches(name_globs, default):
match_names = set()
if name_globs: # For None or () use the default
for name in table.colnames:
for name_glob in name_globs:
if fnmatch.fnmatch(name, name_glob):
match_names.add(name)
break
else:
match_names.update(default)
return match_names
include_names = get_matches(table.pprint_include_names(), table.colnames)
exclude_names = get_matches(table.pprint_exclude_names(), [])
return include_names - exclude_names
class TableFormatter:
@staticmethod
def _get_pprint_size(max_lines=None, max_width=None):
"""Get the output size (number of lines and character width) for Column and
Table pformat/pprint methods.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be determined
using the ``astropy.table.conf.max_lines`` configuration item. If a
negative value of ``max_lines`` is supplied then there is no line
limit applied.
The same applies for max_width except the configuration item is
``astropy.table.conf.max_width``.
Parameters
----------
max_lines : int or None
Maximum lines of output (header + data rows)
max_width : int or None
Maximum width (characters) output
Returns
-------
max_lines, max_width : int
"""
# Declare to keep static type checker happy.
lines = None
width = None
if max_lines is None:
max_lines = conf.max_lines
if max_width is None:
max_width = conf.max_width
if max_lines is None or max_width is None:
lines, width = terminal_size()
if max_lines is None:
max_lines = lines
elif max_lines < 0:
max_lines = sys.maxsize
if max_lines < 8:
max_lines = 8
if max_width is None:
max_width = width
elif max_width < 0:
max_width = sys.maxsize
if max_width < 10:
max_width = 10
return max_lines, max_width
def _pformat_col(
self,
col,
max_lines=None,
show_name=True,
show_unit=None,
show_dtype=False,
show_length=None,
html=False,
align=None,
):
"""Return a list of formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
html : bool
Output column as HTML
align : str
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively.
Returns
-------
lines : list
List of lines with formatted column values
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
if show_unit is None:
show_unit = col.info.unit is not None
outs = {} # Some values from _pformat_col_iter iterator that are needed here
col_strs_iter = self._pformat_col_iter(
col,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
show_length=show_length,
outs=outs,
)
# Replace tab and newline with text representations so they display nicely.
# Newline in particular is a problem in a multicolumn table.
col_strs = [
val.replace("\t", "\\t").replace("\n", "\\n") for val in col_strs_iter
]
if len(col_strs) > 0:
col_width = max(len(x) for x in col_strs)
if html:
from astropy.utils.xml.writer import xml_escape
n_header = outs["n_header"]
for i, col_str in enumerate(col_strs):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = "th" if i < n_header else "td"
val = f"<{td}>{xml_escape(col_str.strip())}</{td}>"
row = "<tr>" + val + "</tr>"
if i < n_header:
row = "<thead>" + row + "</thead>"
col_strs[i] = row
if n_header > 0:
# Get rid of '---' header line
col_strs.pop(n_header - 1)
col_strs.insert(0, "<table>")
col_strs.append("</table>")
# Now bring all the column string values to the same fixed width
else:
col_width = max(len(x) for x in col_strs) if col_strs else 1
# Center line header content and generate dashed headerline
for i in outs["i_centers"]:
col_strs[i] = col_strs[i].center(col_width)
if outs["i_dashes"] is not None:
col_strs[outs["i_dashes"]] = "-" * col_width
# Format columns according to alignment. `align` arg has precedent, otherwise
# use `col.format` if it starts as a legal alignment string. If neither applies
# then right justify.
re_fill_align = re.compile(r"(?P<fill>.?)(?P<align>[<^>=])")
match = None
if align:
# If there is an align specified then it must match
match = re_fill_align.match(align)
if not match:
raise ValueError(
"column align must be one of '<', '^', '>', or '='"
)
elif isinstance(col.info.format, str):
# col.info.format need not match, in which case rjust gets used
match = re_fill_align.match(col.info.format)
if match:
fill_char = match.group("fill")
align_char = match.group("align")
if align_char == "=":
if fill_char != "0":
raise ValueError("fill character must be '0' for '=' align")
# str.zfill gets used which does not take fill char arg
fill_char = ""
else:
fill_char = ""
align_char = ">"
justify_methods = {"<": "ljust", "^": "center", ">": "rjust", "=": "zfill"}
justify_method = justify_methods[align_char]
justify_args = (col_width, fill_char) if fill_char else (col_width,)
for i, col_str in enumerate(col_strs):
col_strs[i] = getattr(col_str, justify_method)(*justify_args)
if outs["show_length"]:
col_strs.append(f"Length = {len(col)} rows")
return col_strs, outs
def _name_and_structure(self, name, dtype, sep=" "):
"""Format a column name, including a possible structure.
Normally, just returns the name, but if it has a structured dtype,
will add the parts in between square brackets. E.g.,
"name [f0, f1]" or "name [f0[sf0, sf1], f1]".
"""
if dtype is None or dtype.names is None:
return name
structure = ", ".join(
[
self._name_and_structure(name, dt, sep="")
for name, (dt, _) in dtype.fields.items()
]
)
return f"{name}{sep}[{structure}]"
def _pformat_col_iter(
self,
col,
max_lines,
show_name,
show_unit,
outs,
show_dtype=False,
show_length=None,
):
"""Iterator which yields formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
outs : dict
Must be a dict which is used to pass back additional values
defined within the iterator.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
"""
max_lines, _ = self._get_pprint_size(max_lines, -1)
dtype = getattr(col, "dtype", None)
multidims = getattr(col, "shape", [0])[1:]
if multidims:
multidim0 = tuple(0 for n in multidims)
multidim1 = tuple(n - 1 for n in multidims)
multidims_all_ones = np.prod(multidims) == 1
multidims_has_zero = 0 in multidims
i_dashes = None
i_centers = [] # Line indexes where content should be centered
n_header = 0
if show_name:
i_centers.append(n_header)
# Get column name (or 'None' if not set)
col_name = str(col.info.name)
n_header += 1
yield self._name_and_structure(col_name, dtype)
if show_unit:
i_centers.append(n_header)
n_header += 1
yield str(col.info.unit or "")
if show_dtype:
i_centers.append(n_header)
n_header += 1
if dtype is not None:
col_dtype = dtype_info_name((dtype, multidims))
else:
col_dtype = col.__class__.__qualname__ or "object"
yield col_dtype
if show_unit or show_name or show_dtype:
i_dashes = n_header
n_header += 1
yield "---"
max_lines -= n_header
n_print2 = max_lines // 2
n_rows = len(col)
# This block of code is responsible for producing the function that
# will format values for this column. The ``format_func`` function
# takes two args (col_format, val) and returns the string-formatted
# version. Some points to understand:
#
# - col_format could itself be the formatting function, so it will
# actually end up being called with itself as the first arg. In
# this case the function is expected to ignore its first arg.
#
# - auto_format_func is a function that gets called on the first
# column value that is being formatted. It then determines an
# appropriate formatting function given the actual value to be
# formatted. This might be deterministic or it might involve
# try/except. The latter allows for different string formatting
# options like %f or {:5.3f}. When auto_format_func is called it:
# 1. Caches the function in the _format_funcs dict so for subsequent
# values the right function is called right away.
# 2. Returns the formatted value.
#
# - possible_string_format_functions is a function that yields a
# succession of functions that might successfully format the
# value. There is a default, but Mixin methods can override this.
# See Quantity for an example.
#
# - get_auto_format_func() returns a wrapped version of auto_format_func
# with the column id and possible_string_format_functions as
# enclosed variables.
col_format = col.info.format or getattr(col.info, "default_format", None)
pssf = (
getattr(col.info, "possible_string_format_functions", None)
or _possible_string_format_functions
)
auto_format_func = get_auto_format_func(col, pssf)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
if len(col) > max_lines:
if show_length is None:
show_length = True
i0 = n_print2 - (1 if show_length else 0)
i1 = n_rows - n_print2 - max_lines % 2
indices = np.concatenate(
[np.arange(0, i0 + 1), np.arange(i1 + 1, len(col))]
)
else:
i0 = -1
indices = np.arange(len(col))
def format_col_str(idx):
if multidims:
# Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')
# with shape (n,1,...,1) from being printed as if there was
# more than one element in a row
if multidims_all_ones:
return format_func(col_format, col[(idx,) + multidim0])
elif multidims_has_zero:
# Any zero dimension means there is no data to print
return ""
else:
left = format_func(col_format, col[(idx,) + multidim0])
right = format_func(col_format, col[(idx,) + multidim1])
return f"{left} .. {right}"
else:
return format_func(col_format, col[idx])
# Add formatted values if within bounds allowed by max_lines
for idx in indices:
if idx == i0:
yield "..."
else:
try:
yield format_col_str(idx)
except ValueError:
raise ValueError(
'Unable to parse format string "{}" for entry "{}" '
'in column "{}"'.format(col_format, col[idx], col.info.name)
)
outs["show_length"] = show_length
outs["n_header"] = n_header
outs["i_centers"] = i_centers
outs["i_dashes"] = i_dashes
def _pformat_table(
self,
table,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
tableclass=None,
align=None,
):
"""Return a list of lines for the formatted string representation of
the table.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is to False.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(table)
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
none
align : str or list or tuple
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively. A list of strings can be
provided for alignment of tables with multiple columns.
Returns
-------
rows : list
Formatted table as a list of strings
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
# "Print" all the values into temporary lists by column for subsequent
# use and to determine the width
max_lines, max_width = self._get_pprint_size(max_lines, max_width)
if show_unit is None:
show_unit = any(col.info.unit for col in table.columns.values())
# Coerce align into a correctly-sized list of alignments (if possible)
n_cols = len(table.columns)
if align is None or isinstance(align, str):
align = [align] * n_cols
elif isinstance(align, (list, tuple)):
if len(align) != n_cols:
raise ValueError(
"got {} alignment values instead of "
"the number of columns ({})".format(len(align), n_cols)
)
else:
raise TypeError(
"align keyword must be str or list or tuple (got {})".format(
type(align)
)
)
# Process column visibility from table pprint_include_names and
# pprint_exclude_names attributes and get the set of columns to show.
pprint_include_names = _get_pprint_include_names(table)
cols = []
outs = None # Initialize so static type checker is happy
for align_, col in zip(align, table.columns.values()):
if col.info.name not in pprint_include_names:
continue
lines, outs = self._pformat_col(
col,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
align=align_,
)
if outs["show_length"]:
lines = lines[:-1]
cols.append(lines)
if not cols:
return ["<No columns>"], {"show_length": False}
# Use the values for the last column since they are all the same
n_header = outs["n_header"]
n_rows = len(cols[0])
def outwidth(cols):
return sum(len(c[0]) for c in cols) + len(cols) - 1
dots_col = ["..."] * n_rows
middle = len(cols) // 2
while outwidth(cols) > max_width:
if len(cols) == 1:
break
if len(cols) == 2:
cols[1] = dots_col
break
if cols[middle] is dots_col:
cols.pop(middle)
middle = len(cols) // 2
cols[middle] = dots_col
# Now "print" the (already-stringified) column values into a
# row-oriented list.
rows = []
if html:
from astropy.utils.xml.writer import xml_escape
if tableid is None:
tableid = f"table{id(table)}"
if tableclass is not None:
if isinstance(tableclass, list):
tableclass = " ".join(tableclass)
rows.append(f'<table id="{tableid}" class="{tableclass}">')
else:
rows.append(f'<table id="{tableid}">')
for i in range(n_rows):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = "th" if i < n_header else "td"
vals = (f"<{td}>{xml_escape(col[i].strip())}</{td}>" for col in cols)
row = "<tr>" + "".join(vals) + "</tr>"
if i < n_header:
row = "<thead>" + row + "</thead>"
rows.append(row)
rows.append("</table>")
else:
for i in range(n_rows):
row = " ".join(col[i] for col in cols)
rows.append(row)
return rows, outs
def _more_tabcol(
self,
tabcol,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
):
"""Interactive "more" of a table or column.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
allowed_keys = "f br<>qhpn"
# Count the header lines
n_header = 0
if show_name:
n_header += 1
if show_unit:
n_header += 1
if show_dtype:
n_header += 1
if show_name or show_unit or show_dtype:
n_header += 1
# Set up kwargs for pformat call. Only Table gets max_width.
kwargs = dict(
max_lines=-1,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
if hasattr(tabcol, "columns"): # tabcol is a table
kwargs["max_width"] = max_width
# If max_lines is None (=> query screen size) then increase by 2.
# This is because get_pprint_size leaves 6 extra lines so that in
# ipython you normally see the last input line.
max_lines1, max_width = self._get_pprint_size(max_lines, max_width)
if max_lines is None:
max_lines1 += 2
delta_lines = max_lines1 - n_header
# Set up a function to get a single character on any platform
inkey = Getch()
i0 = 0 # First table/column row to show
showlines = True
while True:
i1 = i0 + delta_lines # Last table/col row to show
if showlines: # Don't always show the table (e.g. after help)
try:
os.system("cls" if os.name == "nt" else "clear")
except Exception:
pass # No worries if clear screen call fails
lines = tabcol[i0:i1].pformat(**kwargs)
colors = (
"red" if i < n_header else "default" for i in range(len(lines))
)
for color, line in zip(colors, lines):
color_print(line, color)
showlines = True
print()
print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=" ")
# Get a valid key
while True:
try:
key = inkey().lower()
except Exception:
print("\n")
log.error(
"Console does not support getting a character"
" as required by more(). Use pprint() instead."
)
return
if key in allowed_keys:
break
print(key)
if key.lower() == "q":
break
elif key == " " or key == "f":
i0 += delta_lines
elif key == "b":
i0 = i0 - delta_lines
elif key == "r":
pass
elif key == "<":
i0 = 0
elif key == ">":
i0 = len(tabcol)
elif key == "p":
i0 -= 1
elif key == "n":
i0 += 1
elif key == "h":
showlines = False
print(
"""
Browsing keys:
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help""",
end=" ",
)
if i0 < 0:
i0 = 0
if i0 >= len(tabcol) - delta_lines:
i0 = len(tabcol) - delta_lines
print("\n")
|
9bdd4be53b4f55857cd4a306a9c2dd5c8c7225b5a747295e61be7b8fa99d1353 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains functions to determine where configuration and
data/cache files used by Astropy should be placed.
"""
import os
import shutil
import sys
from functools import wraps
__all__ = ["get_config_dir", "get_cache_dir", "set_temp_config", "set_temp_cache"]
def _find_home():
"""Locates and return the home directory (or best approximation) on this
system.
Raises
------
OSError
If the home directory cannot be located - usually means you are running
Astropy on some obscure platform that doesn't have standard home
directories.
"""
try:
homedir = os.path.expanduser("~")
except Exception:
# Linux, Unix, AIX, OS X
if os.name == "posix":
if "HOME" in os.environ:
homedir = os.environ["HOME"]
else:
raise OSError(
"Could not find unix home directory to search for "
"astropy config dir"
)
elif os.name == "nt": # This is for all modern Windows (NT or after)
if "MSYSTEM" in os.environ and os.environ.get("HOME"):
# Likely using an msys shell; use whatever it is using for its
# $HOME directory
homedir = os.environ["HOME"]
# See if there's a local home
elif "HOMEDRIVE" in os.environ and "HOMEPATH" in os.environ:
homedir = os.path.join(os.environ["HOMEDRIVE"], os.environ["HOMEPATH"])
# Maybe a user profile?
elif "USERPROFILE" in os.environ:
homedir = os.path.join(os.environ["USERPROFILE"])
else:
try:
import winreg as wreg
shell_folders = r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" # noqa: E501
key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders)
homedir = wreg.QueryValueEx(key, "Personal")[0]
key.Close()
except Exception:
# As a final possible resort, see if HOME is present
if "HOME" in os.environ:
homedir = os.environ["HOME"]
else:
raise OSError(
"Could not find windows home directory to "
"search for astropy config dir"
)
else:
# for other platforms, try HOME, although it probably isn't there
if "HOME" in os.environ:
homedir = os.environ["HOME"]
else:
raise OSError(
"Could not find a home directory to search for "
"astropy config dir - are you on an unsupported "
"platform?"
)
return homedir
def get_config_dir(rootname="astropy"):
"""
Determines the package configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.astropy/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Parameters
----------
rootname : str
Name of the root configuration directory. For example, if ``rootname =
'pkgname'``, the configuration directory would be ``<home>/.pkgname/``
rather than ``<home>/.astropy`` (depending on platform).
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_config, that overrides all
if set_temp_config._temp_path is not None:
xch = set_temp_config._temp_path
config_path = os.path.join(xch, rootname)
if not os.path.exists(config_path):
os.mkdir(config_path)
return os.path.abspath(config_path)
# first look for XDG_CONFIG_HOME
xch = os.environ.get("XDG_CONFIG_HOME")
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, rootname)
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_root_dir("config", linkto, rootname))
def get_cache_dir(rootname="astropy"):
"""
Determines the Astropy cache directory name and creates the directory if it
doesn't exist.
This directory is typically ``$HOME/.astropy/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Parameters
----------
rootname : str
Name of the root cache directory. For example, if
``rootname = 'pkgname'``, the cache directory will be
``<cache>/.pkgname/``.
Returns
-------
cachedir : str
The absolute path to the cache directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_cache, that overrides all
if set_temp_cache._temp_path is not None:
xch = set_temp_cache._temp_path
cache_path = os.path.join(xch, rootname)
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return os.path.abspath(cache_path)
# first look for XDG_CACHE_HOME
xch = os.environ.get("XDG_CACHE_HOME")
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, rootname)
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_root_dir("cache", linkto, rootname))
class _SetTempPath:
_temp_path = None
_default_path_getter = None
def __init__(self, path=None, delete=False):
if path is not None:
path = os.path.abspath(path)
self._path = path
self._delete = delete
self._prev_path = self.__class__._temp_path
def __enter__(self):
self.__class__._temp_path = self._path
try:
return self._default_path_getter("astropy")
except Exception:
self.__class__._temp_path = self._prev_path
raise
def __exit__(self, *args):
self.__class__._temp_path = self._prev_path
if self._delete and self._path is not None:
shutil.rmtree(self._path)
def __call__(self, func):
"""Implements use as a decorator."""
@wraps(func)
def wrapper(*args, **kwargs):
with self:
func(*args, **kwargs)
return wrapper
class set_temp_config(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_config_dir)
def __enter__(self):
# Special case for the config case, where we need to reset all the
# cached config objects. We do keep the cache, since some of it
# may have been set programmatically rather than be stored in the
# config file (e.g., iers.conf.auto_download=False for our tests).
from .configuration import _cfgobjs
self._cfgobjs_copy = _cfgobjs.copy()
_cfgobjs.clear()
return super().__enter__()
def __exit__(self, *args):
from .configuration import _cfgobjs
_cfgobjs.clear()
_cfgobjs.update(self._cfgobjs_copy)
del self._cfgobjs_copy
super().__exit__(*args)
class set_temp_cache(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy download cache,
primarily for use with testing (though there may be other applications
for setting a different cache directory, for example to switch to a cache
dedicated to large files).
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the cache path
just within that function.
Parameters
----------
path : str
The directory (which must exist) in which to find the Astropy cache
files, or create them if they do not already exist. If None, this
restores the cache path to the user's default cache path as returned
by `get_cache_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_cache_dir)
def _find_or_create_root_dir(dirnm, linkto, pkgname="astropy"):
innerdir = os.path.join(_find_home(), f".{pkgname}")
maindir = os.path.join(_find_home(), f".{pkgname}", dirnm)
if not os.path.exists(maindir):
# first create .astropy dir if needed
if not os.path.exists(innerdir):
try:
os.mkdir(innerdir)
except OSError:
if not os.path.isdir(innerdir):
raise
elif not os.path.isdir(innerdir):
raise OSError(
f"Intended {pkgname} {dirnm} directory {maindir} is actually a file."
)
try:
os.mkdir(maindir)
except OSError:
if not os.path.isdir(maindir):
raise
if (
not sys.platform.startswith("win")
and linkto is not None
and not os.path.exists(linkto)
):
os.symlink(maindir, linkto)
elif not os.path.isdir(maindir):
raise OSError(
f"Intended {pkgname} {dirnm} directory {maindir} is actually a file."
)
return os.path.abspath(maindir)
|
de49f31d7e443fea5156487bb78b8d99a0b0e7e1e367392a207f9470d230c48e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
# pylint: disable=invalid-name
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
from importlib.metadata import entry_points
import numpy as np
from astropy.units import Quantity
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyUserWarning
from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex
from .spline import ( # noqa: F401
SplineExactKnotsFitter,
SplineInterpolateFitter,
SplineSmoothingFitter,
SplineSplrepFitter,
)
from .statistic import leastsquare
from .utils import _combine_equivalency_dict, poly_map_domain
__all__ = [
"LinearLSQFitter",
"LevMarLSQFitter",
"TRFLSQFitter",
"DogBoxLSQFitter",
"LMLSQFitter",
"FittingWithOutlierRemoval",
"SLSQPLSQFitter",
"SimplexLSQFitter",
"JointFitter",
"Fitter",
"ModelLinearityError",
"ModelsError",
]
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
class NonFiniteValueError(RuntimeError):
"""
Error raised when attempting to a non-finite value
"""
class Covariance:
"""Class for covariance matrix calculated by fitter."""
def __init__(self, cov_matrix, param_names):
self.cov_matrix = cov_matrix
self.param_names = param_names
def pprint(self, max_lines, round_val):
# Print and label lower triangle of covariance matrix
# Print rows for params up to `max_lines`, round floats to 'round_val'
longest_name = max(len(x) for x in self.param_names)
ret_str = "parameter variances / covariances \n"
fstring = f'{"": <{longest_name}}| {{0}}\n'
for i, row in enumerate(self.cov_matrix):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += fstring.replace(" " * len(param), param, 1).format(
repr(np.round(row[: i + 1], round_val))[7:-2]
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, params):
# index covariance matrix by parameter names or indices
if len(params) != 2:
raise ValueError("Covariance must be indexed by two values.")
if all(isinstance(item, str) for item in params):
i1, i2 = self.param_names.index(params[0]), self.param_names.index(
params[1]
)
elif all(isinstance(item, int) for item in params):
i1, i2 = params
else:
raise TypeError(
"Covariance can be indexed by two parameter names or integer indices."
)
return self.cov_matrix[i1][i2]
class StandardDeviations:
"""Class for fitting uncertainties."""
def __init__(self, cov_matrix, param_names):
self.param_names = param_names
self.stds = self._calc_stds(cov_matrix)
def _calc_stds(self, cov_matrix):
# sometimes scipy lstsq returns a non-sensical negative vals in the
# diagonals of the cov_x it computes.
stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)]
return stds
def pprint(self, max_lines, round_val):
longest_name = max(len(x) for x in self.param_names)
ret_str = "standard deviations\n"
for i, std in enumerate(self.stds):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += (
f"{param}{' ' * (longest_name - len(param))}| "
f"{np.round(std, round_val)}\n"
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, param):
if isinstance(param, str):
i = self.param_names.index(param)
elif isinstance(param, int):
i = param
else:
raise TypeError(
"Standard deviation can be indexed by parameter name or integer."
)
return self.stds[i]
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
"""Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith("_"):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop("equivalencies", None)
data_has_units = (
isinstance(x, Quantity)
or isinstance(y, Quantity)
or isinstance(z, Quantity)
)
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies
)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(
model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]],
)
if isinstance(y, Quantity) and z is not None:
y = y.to(
model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]],
)
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data["z"] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
if isinstance(model, tuple):
rename_data["_left_kwargs"] = model[1]
rename_data["_right_kwargs"] = model[2]
model = model[0]
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError(
"This model does not support being fit to data with units."
)
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
supported_constraints = []
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@staticmethod
def _add_fitting_uncertainties(*args):
"""
When available, calculate and sets the parameter covariance matrix
(model.cov_matrix) and standard deviations (model.stds).
"""
return None
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ["fixed"]
supports_masked_input = True
def __init__(self, calc_uncertainties=False):
self.fit_info = {
"residuals": None,
"rank": None,
"singular_values": None,
"params": None,
}
self._calc_uncertainties = calc_uncertainties
@staticmethod
def _is_invertible(m):
"""Check if inverse of matrix can be obtained."""
if m.shape[0] != m.shape[1]:
return False
if np.linalg.matrix_rank(m) < m.shape[0]:
return False
return True
def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None, resids=None):
"""
Calculate and parameter covariance matrix and standard deviations
and set `cov_matrix` and `stds` attributes.
"""
x_dot_x_prime = np.dot(a.T, a)
masked = False or hasattr(y, "mask")
# check if invertible. if not, can't calc covariance.
if not self._is_invertible(x_dot_x_prime):
return model
inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime)
if z is None: # 1D models
if len(model) == 1: # single model
mask = None
if masked:
mask = y.mask
xx = np.ma.array(x, mask=mask)
RSS = [(1 / (xx.count() - n_coeff)) * resids]
if len(model) > 1: # model sets
RSS = [] # collect sum residuals squared for each model in set
for j in range(len(model)):
mask = None
if masked:
mask = y.mask[..., j].flatten()
xx = np.ma.array(x, mask=mask)
eval_y = model(xx, model_set_axis=False)
eval_y = np.rollaxis(eval_y, model.model_set_axis)[j]
RSS.append(
(1 / (xx.count() - n_coeff)) * np.sum((y[..., j] - eval_y) ** 2)
)
else: # 2D model
if len(model) == 1:
mask = None
if masked:
warnings.warn(
"Calculation of fitting uncertainties "
"for 2D models with masked values not "
"currently supported.\n",
AstropyUserWarning,
)
return
xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask)
# len(xx) instead of xx.count. this will break if values are masked?
RSS = [(1 / (len(xx) - n_coeff)) * resids]
else:
RSS = []
for j in range(len(model)):
eval_z = model(x, y, model_set_axis=False)
mask = None # need to figure out how to deal w/ masking here.
if model.model_set_axis == 1:
# model_set_axis passed when evaluating only refers to input shapes
# so output must be reshaped for model_set_axis=1.
eval_z = np.rollaxis(eval_z, 1)
eval_z = eval_z[j]
RSS.append(
[(1 / (len(x) - n_coeff)) * np.sum((z[j] - eval_z) ** 2)]
)
covs = [inv_x_dot_x_prime * r for r in RSS]
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
if len(covs) == 1:
model.cov_matrix = Covariance(covs[0], model.param_names)
model.stds = StandardDeviations(covs[0], free_param_names)
else:
model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs]
model.stds = [StandardDeviations(cov, free_param_names) for cov in covs]
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, "domain") and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, "window") and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, "x_domain") and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, "y_domain") and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, "x_window") and model.x_window is None:
model.x_window = [-1.0, 1.0]
if hasattr(model, "y_window") and model.y_window is None:
model.y_window = [-1.0, 1.0]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like, optional
Input coordinates.
If the dependent (``y`` or ``z``) coordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
coordinate grids differ.
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError(
"Model is not linear in parameters, "
"linear fit methods should not be used."
)
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
model_copy.sync_constraints = False
_, fitparam_indices, _ = model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(
x, y, z, n_models=len(model_copy), model_set_axis=model_copy.model_set_axis
)
n_fixed = sum(model_copy.fixed.values())
# This is also done by _convert_inputs, but we need it here to allow
# checking the array dimensionality before that gets called:
if weights is not None:
weights = np.asarray(weights, dtype=float)
if n_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [
idx
for idx in range(len(model_copy.param_names))
if idx not in fitparam_indices
]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray(
[
getattr(model_copy, model_copy.param_names[idx]).value
for idx in fixparam_indices
]
)
if len(farg) == 2:
x, y = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, weights = _convert_input(
x,
weights,
n_models=len(model_copy) if weights.ndim == y.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "domain"):
x = self._map_domain_window(model_copy, x)
if n_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x
)
else:
lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, _, weights = _convert_input(
x,
y,
weights,
n_models=len(model_copy) if weights.ndim == z.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "x_domain"):
x, y = self._map_domain_window(model_copy, x, y)
if n_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x, y=y)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x, y=y
)
else:
lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
if weights is not None:
# Same for weights
if weights.ndim > 2:
# Separate 2D weights for each model:
weights = np.rollaxis(weights, model_axis, weights.ndim)
weights = weights.reshape(-1, weights.shape[-1])
elif weights.ndim == z.ndim:
# Separate, flattened weights for each model:
weights = weights.T if model_axis == 0 else weights
else:
# Common weights for all the models:
weights = weights.flatten()
else:
rhs = z.flatten()
if weights is not None:
weights = weights.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if np.asanyarray(lhs).ndim > 2:
raise ValueError(
f"{type(model_copy).__name__} gives unsupported >2D "
"derivative matrix for this x/y"
)
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if n_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input coordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
if rhs.ndim == 2:
if weights.shape == rhs.shape:
# separate weights for multiple models case: broadcast
# lhs to have more dimension (for each model)
lhs = lhs[..., np.newaxis] * weights[:, np.newaxis]
rhs = rhs * weights
else:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original
# dependent variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if weights is not None and not masked and np.any(np.isnan(lhs)):
raise ValueError(
"Found NaNs in the coefficient matrix, which "
"should not happen and would crash the lapack "
"routine. Maybe check that weights are not null."
)
a = None # need for calculating covarience
if (masked and len(model_copy) > 1) or (
weights is not None and weights.ndim > 1
):
# Separate masks or weights for multiple models case: Numpy's
# lstsq supports multiple dimensions only for rhs, so we need to
# loop manually on the models. This may be fixed in the future
# with https://github.com/numpy/numpy/pull/15777.
# Initialize empty array of coefficients and populate it one model
# at a time. The shape matches the number of coefficients from the
# Vandermonde matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype)
# Arrange the lhs as a stack of 2D matrices that we can iterate
# over to get the correctly-orientated lhs for each model:
if lhs.ndim > 2:
lhs_stack = np.rollaxis(lhs, -1, 0)
else:
lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask if masked else slice(None)
model_lhs = model_lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
a = model_lhs
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(
model_lhs, model_rhs, rcond
)
model_lacoef[:] = t_coef.T
else:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
a = lhs[good]
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good], rhs[good], rcond)
self.fit_info["residuals"] = resids
self.fit_info["rank"] = rank
self.fit_info["singular_values"] = sval
lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl
self.fit_info["params"] = lacoef
fitter_to_model_params(model_copy, lacoef.flatten())
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if (
hasattr(model_copy, "_order")
and len(model_copy) == 1
and rank < (model_copy._order - n_fixed)
):
warnings.warn("The fit may be poorly conditioned\n", AstropyUserWarning)
# calculate and set covariance matrix and standard devs. on model
if self._calc_uncertainties:
if len(y) > len(lacoef):
self._add_fitting_uncertainties(
model_copy, a * scl, len(lacoef), x, y, z, resids
)
model_copy.sync_constraints = True
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : `Fitter`
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : callable
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {"niter": None}
def __str__(self):
return (
f"Fitter: {self.fitter.__class__.__name__}\n"
f"Outlier function: {self.outlier_func.__name__}\n"
f"Num. of iterations: {self.niter}\n"
f"Outlier func. args.: {self.outlier_kwargs}"
)
def __repr__(self):
return (
f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, "
f"outlier_func: {self.outlier_func.__name__},"
f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})"
)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like, optional
Data measurements (2D case).
weights : array-like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y coordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if (
not hasattr(self.fitter, "supports_masked_input")
or self.fitter.supports_masked_input is not True
):
raise ValueError(
f"{type(self.fitter).__name__} cannot fit model sets with masked "
"values"
)
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input coordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x,)
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if "axis" not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs["axis"] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
n = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for n in range(1, self.niter + 1):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop("axis", None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True,
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(
data_T, mask_T, model_vals_T
):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn(
"outlier_func did not accept axis argument; "
"reverted to slow loop over models.",
AstropyUserWarning,
)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(
fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights,
**kwargs,
)
else:
fitted_model = self.fitter(
fitted_model,
*coords,
filtered_data,
weights=filtered_weights,
**kwargs,
)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {"niter": n}
self.fit_info.update(getattr(self.fitter, "fit_info", {}))
return fitted_model, filtered_data.mask
class _NonLinearLSQFitter(metaclass=_FitterMeta):
"""
Base class for Non-Linear least-squares fitters
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds : bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition.
Default: True
"""
supported_constraints = ["fixed", "tied", "bounds"]
"""
The constraint types supported by this fitter type.
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=True):
self.fit_info = None
self._calc_uncertainties = calc_uncertainties
self._use_min_max_bounds = use_min_max_bounds
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
fitter_to_model_params(model, fps, self._use_min_max_bounds)
meas = args[-1]
if weights is None:
value = np.ravel(model(*args[2:-1]) - meas)
else:
value = np.ravel(weights * (model(*args[2:-1]) - meas))
if not np.all(np.isfinite(value)):
raise NonFiniteValueError(
"Objective function has encountered a non-finite value, "
"this will cause the fit to fail!\n"
"Please remove non-finite values from your input data before "
"fitting to avoid this error."
)
return value
@staticmethod
def _add_fitting_uncertainties(model, cov_matrix):
"""
Set ``cov_matrix`` and ``stds`` attributes on model with parameter
covariance matrix returned by ``optimize.leastsq``.
"""
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
model.cov_matrix = Covariance(cov_matrix, free_param_names)
model.stds = StandardDeviations(cov_matrix, free_param_names)
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array(
[np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)]
)
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars], True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
fit_deriv = np.array(model.fit_deriv(x, *params))
try:
output = np.array(
[np.ravel(_) for _ in np.array(weights) * fit_deriv]
)
if output.shape != fit_deriv.shape:
output = np.array(
[np.ravel(_) for _ in np.atleast_2d(weights).T * fit_deriv]
)
return output
except ValueError:
return np.array(
[
np.ravel(_)
for _ in np.array(weights) * np.moveaxis(fit_deriv, -1, 0)
]
).transpose()
else:
if not model.col_fit_deriv:
return [
np.ravel(_)
for _ in (
np.ravel(weights)
* np.array(model.fit_deriv(x, y, *params)).T
).T
]
return [
np.ravel(_)
for _ in weights * np.array(model.fit_deriv(x, y, *params))
]
def _compute_param_cov(self, model, y, init_values, cov_x, fitparams, farg):
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg) ** 2)
dof = len(y) - len(init_values)
self.fit_info["param_cov"] = cov_x * sum_sqrs / dof
else:
self.fit_info["param_cov"] = None
if self._calc_uncertainties is True:
if self.fit_info["param_cov"] is not None:
self._add_fitting_uncertainties(model, self.fit_info["param_cov"])
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
return None, None, None
def _filter_non_finite(self, x, y, z=None):
"""
Filter out non-finite values in x, y, z.
Returns
-------
x, y, z : ndarrays
x, y, and z with non-finite values filtered out.
"""
MESSAGE = "Non-Finite input data has been removed by the fitter."
if z is None:
mask = np.isfinite(y)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], None
else:
mask = np.isfinite(z)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], z[mask]
@fitter_unit_support
def __call__(
self,
model,
x,
y,
z=None,
weights=None,
maxiter=DEFAULT_MAXITER,
acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS,
estimate_jacobian=False,
filter_non_finite=False,
):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
filter_non_finite : bool, optional
Whether or not to filter data with non-finite values. Default is False
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self.supported_constraints)
model_copy.sync_constraints = False
if filter_non_finite:
x, y, z = self._filter_non_finite(x, y, z)
farg = (
model_copy,
weights,
) + _convert_input(x, y, z)
init_values, fitparams, cov_x = self._run_fitter(
model_copy, farg, maxiter, acc, epsilon, estimate_jacobian
)
self._compute_param_cov(model_copy, y, init_values, cov_x, fitparams, farg)
model.sync_constraints = True
return model_copy
class LevMarLSQFitter(_NonLinearLSQFitter):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
def __init__(self, calc_uncertainties=False):
super().__init__(calc_uncertainties)
self.fit_info = {
"nfev": None,
"fvec": None,
"fjac": None,
"ipvt": None,
"qtf": None,
"message": None,
"ierr": None,
"param_jac": None,
"param_cov": None,
}
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
if model.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _, _ = model_to_fit_params(model)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function,
init_values,
args=farg,
Dfun=dfunc,
col_deriv=model.col_fit_deriv,
maxfev=maxiter,
epsfcn=epsilon,
xtol=acc,
full_output=True,
)
fitter_to_model_params(model, fitparams)
self.fit_info.update(dinfo)
self.fit_info["cov_x"] = cov_x
self.fit_info["message"] = mess
self.fit_info["ierr"] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn(
"The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning,
)
return init_values, fitparams, cov_x
class _NLLSQFitter(_NonLinearLSQFitter):
"""
Wrapper class for `scipy.optimize.least_squares` method, which provides:
- Trust Region Reflective
- dogbox
- Levenberg-Marqueardt
algorithms using the least squares statistic.
Parameters
----------
method : str
‘trf’ : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
‘dogbox’ : dogleg algorithm with rectangular trust regions, typical
use case is small problems with bounds. Not recommended for
problems with rank-deficient Jacobian.
‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn’t handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__(calc_uncertainties, use_min_max_bounds)
self._method = method
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
from scipy.linalg import svd
if model.fit_deriv is None or estimate_jacobian:
dfunc = "2-point"
else:
def _dfunc(params, model, weights, x, y, z=None):
if model.col_fit_deriv:
return np.transpose(
self._wrap_deriv(params, model, weights, x, y, z)
)
else:
return self._wrap_deriv(params, model, weights, x, y, z)
dfunc = _dfunc
init_values, _, bounds = model_to_fit_params(model)
# Note, if use_min_max_bounds is True we are defaulting to enforcing bounds
# using the old method employed by LevMarLSQFitter, this is different
# from the method that optimize.least_squares employs to enforce bounds
# thus we override the bounds being passed to optimize.least_squares so
# that it will not enforce any bounding.
if self._use_min_max_bounds:
bounds = (-np.inf, np.inf)
self.fit_info = optimize.least_squares(
self.objective_function,
init_values,
args=farg,
jac=dfunc,
max_nfev=maxiter,
diff_step=np.sqrt(epsilon),
xtol=acc,
method=self._method,
bounds=bounds,
)
# Adapted from ~scipy.optimize.minpack, see:
# https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(self.fit_info.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[: s.size]
cov_x = np.dot(VT.T / s**2, VT)
fitter_to_model_params(model, self.fit_info.x, False)
if not self.fit_info.success:
warnings.warn(
f"The fit may be unsuccessful; check: \n {self.fit_info.message}",
AstropyUserWarning,
)
return init_values, self.fit_info.x, cov_x
class TRFLSQFitter(_NLLSQFitter):
"""
Trust Region Reflective algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("trf", calc_uncertainties, use_min_max_bounds)
class DogBoxLSQFitter(_NLLSQFitter):
"""
DogBox algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("dogbox", calc_uncertainties, use_min_max_bounds)
class LMLSQFitter(_NLLSQFitter):
"""
`scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False):
super().__init__("lm", calc_uncertainties, True)
class SLSQPLSQFitter(Fitter):
"""
Sequential Least Squares Programming (SLSQP) optimization algorithm and
least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
Notes
-----
See also the `~astropy.modeling.optimizers.SLSQP` optimizer.
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
`ModelLinearityError`
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self.model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]["slice"]
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[: model.n_inputs + 1]
del lstsqargs[: model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError(f"Expected >1 models, {len(self.models)} is given")
if len(self.jointparams.keys()) < 2:
raise TypeError(
"At least two parameters are expected, "
f"{len(self.jointparams.keys())} is given"
)
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError(
f"{len(self.jointparams[j])} parameter(s) "
f"provided but {len(self.initvals)} expected"
)
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError(
f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} "
f"coordinates in args but {len(args)} provided"
)
self.fitparams[:], _ = optimize.leastsq(
self.objective_function, self.fitparams, args=args
)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
data_ndim, data_shape = z.ndim, z.shape
else:
data_ndim, data_shape = y.ndim, y.shape
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1 or data_ndim > x.ndim:
if (model_set_axis or 0) >= data_ndim:
raise ValueError("model_set_axis out of range")
if data_shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y or z array) is expected to equal "
"the number of parameter sets"
)
if z is None:
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
data_shape = y.shape[:-1]
else:
# Shape of z excluding model_set_axis
data_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1 :]
if z is None:
if data_shape != x.shape:
raise ValueError("x and y should have the same shape")
farg = (x, y)
else:
if not (x.shape == y.shape == data_shape):
raise ValueError("x, y and z should have the same shape")
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def fitter_to_model_params(model, fps, use_min_max_bounds=True):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
Parameters
----------
model :
The model being fit
fps :
The fit parameter values to be assigned
use_min_max_bounds: bool
If the set parameter bounds for model will be enforced on each
parameter with bounds.
Default: True
"""
_, fit_param_indices, _ = model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]["slice"]
shape = param_metrics[name]["shape"]
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset : offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None) and use_min_max_bounds:
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]["slice"]
# To handle multiple tied constraints, model parameters
# need to be updated after each iteration.
parameters[slice_] = value
model._array_to_parameters()
@deprecated("5.1", "private method: _fitter_to_model_params has been made public now")
def _fitter_to_model_params(model, fps):
return fitter_to_model_params(model, fps)
def model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
model_params = model.parameters
model_bounds = list(model.bounds.values())
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model_params)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]["slice"]
del params[slice_]
del model_bounds[slice_]
del fitparam_indices[idx]
model_params = np.array(params)
for idx, bound in enumerate(model_bounds):
if bound[0] is None:
lower = -np.inf
else:
lower = bound[0]
if bound[1] is None:
upper = np.inf
else:
upper = bound[1]
model_bounds[idx] = (lower, upper)
model_bounds = tuple(zip(*model_bounds))
return model_params, fitparam_indices, model_bounds
@deprecated("5.1", "private method: _model_to_fit_params has been made public now")
def _model_to_fit_params(model):
return model_to_fit_params(model)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = "Optimizer cannot handle {0} constraints."
if any(model.fixed.values()) and "fixed" not in supported_constraints:
raise UnsupportedConstraintError(message.format("fixed parameter"))
if any(model.tied.values()) and "tied" not in supported_constraints:
raise UnsupportedConstraintError(message.format("tied parameter"))
if (
any(tuple(b) != (None, None) for b in model.bounds.values())
and "bounds" not in supported_constraints
):
raise UnsupportedConstraintError(message.format("bound parameter"))
if model.eqcons and "eqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("equality"))
if model.ineqcons and "ineqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("inequality"))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn(
"Model is linear in parameters; consider using linear fitting methods.",
AstropyUserWarning,
)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here
<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(
AstropyUserWarning(
f"{type(e).__name__} error occurred in entry point {name}."
)
)
else:
if not inspect.isclass(entry_point):
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to be a Class."
)
)
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to extend "
"astropy.modeling.Fitter"
)
)
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, "select"):
populate_entry_points(ep.select(group="astropy.modeling"))
else:
populate_entry_points(ep.get("astropy.modeling", []))
_populate_ep()
|
24edb7921cc9d5ac9fdda777c0fc7976fe8c0cc64f0107add564502a4995db74 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
# pylint: disable=invalid-name, protected-access, redefined-outer-name
import abc
import copy
import functools
import inspect
import itertools
import operator
import types
from collections import defaultdict, deque
from inspect import signature
from itertools import chain
import numpy as np
from astropy.nddata.utils import add_array, extract_array
from astropy.table import Table
from astropy.units import Quantity, UnitsError, dimensionless_unscaled
from astropy.units.utils import quantity_asanyarray
from astropy.utils import (
IncompatibleShapeError,
check_broadcast,
find_current_module,
indent,
isiterable,
metadata,
sharedmethod,
)
from astropy.utils.codegen import make_function_with_signature
from .bounding_box import CompoundBoundingBox, ModelBoundingBox
from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline
from .utils import (
_combine_equivalency_dict,
_ConstraintsDict,
_SpecialOperatorsDict,
combine_labels,
get_inputs_and_params,
make_binary_operator_eval,
)
__all__ = [
"Model",
"FittableModel",
"Fittable1DModel",
"Fittable2DModel",
"CompoundModel",
"fix_inputs",
"custom_model",
"ModelDefinitionError",
"bind_bounding_box",
"bind_compound_bounding_box",
]
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
"""
return lambda left, right: CompoundModel(oper, left, right, **kwargs)
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions."""
class _ModelMeta(abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
def __new__(mcls, name, bases, members, **kwds):
# See the docstring for _is_dynamic above
if "_is_dynamic" not in members:
members["_is_dynamic"] = mcls._is_dynamic
opermethods = [
("__add__", _model_oper("+")),
("__sub__", _model_oper("-")),
("__mul__", _model_oper("*")),
("__truediv__", _model_oper("/")),
("__pow__", _model_oper("**")),
("__or__", _model_oper("|")),
("__and__", _model_oper("&")),
("_fix_inputs", _model_oper("fix_inputs")),
]
members["_parameters_"] = {
k: v for k, v in members.items() if isinstance(v, Parameter)
}
for opermethod, opercall in opermethods:
members[opermethod] = opercall
cls = super().__new__(mcls, name, bases, members, **kwds)
param_names = list(members["_parameters_"])
# Need to walk each base MRO to collect all parameter names
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
# Preserve order of definitions
param_names = list(tbase._parameters_) + param_names
# Remove duplicates (arising from redefinition in subclass).
param_names = list(dict.fromkeys(param_names))
if cls._parameters_:
if hasattr(cls, "_param_names"):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(param_names)
else:
cls.param_names = tuple(param_names)
return cls
def __init__(cls, name, bases, members, **kwds):
super().__init__(name, bases, members, **kwds)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
pdict = {}
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
for parname, val in cls._parameters_.items():
pdict[parname] = val
cls._handle_special_methods(members, pdict)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith("_abc_"):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ("__init__", "__call__"):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith("_") or inspect.isabstract(cls))
def rename(cls, name=None, inputs=None, outputs=None):
"""
Creates a copy of this model class with a new name, inputs or outputs.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class 'astropy.modeling.core.SkyRotation'>
Name: SkyRotation (Rotation2D)
N_inputs: 2
N_outputs: 2
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
if name is None:
name = cls.name
if inputs is None:
inputs = cls.inputs
else:
if not isinstance(inputs, tuple):
raise TypeError("Expected 'inputs' to be a tuple of strings.")
elif len(inputs) != len(cls.inputs):
raise ValueError(f"{cls.name} expects {len(cls.inputs)} inputs")
if outputs is None:
outputs = cls.outputs
else:
if not isinstance(outputs, tuple):
raise TypeError("Expected 'outputs' to be a tuple of strings.")
elif len(outputs) != len(cls.outputs):
raise ValueError(f"{cls.name} expects {len(cls.outputs)} outputs")
new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs})
new_cls.__module__ = modname
new_cls.__qualname__ = name
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get("inverse")
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get("bounding_box")
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = ModelBoundingBox.validate(
cls, bounding_box, _preserve_ignore=True
)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of ModelBoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
f"The bounding_box method for {cls.name} is not correctly "
"defined: If defined as a method all arguments to that "
"method (besides self) must be keyword arguments with "
"default values that can be used to compute a default "
"bounding box."
)
kwargs.append((param.name, param.default))
__call__.__signature__ = sig
return type(
f"{cls.name}ModelBoundingBox", (ModelBoundingBox,), {"__call__": __call__}
)
def _handle_special_methods(cls, members, pdict):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, "__qualname__"):
wrapper.__qualname__ = f"{cls.__qualname__}.{wrapper.__name__}"
if (
"__call__" not in members
and "n_inputs" in members
and isinstance(members["n_inputs"], int)
and members["n_inputs"] > 0
):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
args = ("self",)
kwargs = {
"model_set_axis": None,
"with_bounding_box": False,
"fill_value": np.nan,
"equivalencies": None,
"inputs_map": None,
}
new_call = make_function_with_signature(
__call__, args, kwargs, varargs="inputs", varkwargs="new_inputs"
)
# The following makes it look like __call__
# was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if (
"__init__" not in members
and not inspect.isabstract(cls)
and cls._parameters_
):
# Build list of all parameters including inherited ones
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional
# arguments
if all(p.default is not None for p in pdict.values()):
args = ("self",)
kwargs = []
for param_name, param_val in pdict.items():
default = param_val.default
unit = param_val.unit
# If the unit was specified in the parameter but the
# default is not a Quantity, attach the unit to the
# default.
if unit is not None:
default = Quantity(default, unit, copy=False, subok=True)
kwargs.append((param_name, default))
else:
args = ("self",) + tuple(pdict.keys())
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs="kwargs"
)
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
_fix_inputs = _model_oper("fix_inputs")
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif inspect.isabstract(base) or base.__name__.startswith("_"):
break
bases.append(base.name)
if bases:
return f"{cls.name} ({' -> '.join(bases)})"
return cls.name
try:
default_keywords = [
("Name", format_inheritance(cls)),
("N_inputs", cls.n_inputs),
("N_outputs", cls.n_outputs),
]
if cls.param_names:
default_keywords.append(("Fittable parameters", cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append(f"{keyword}: {value}")
return "\n".join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ("eqcons", "ineqcons")
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
n_inputs = 0
"""The number of inputs."""
n_outputs = 0
""" The number of outputs."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
_has_inverse_bounding_box = False
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
# Covariance matrix can be set by fitter if available.
# If cov_matrix is available, then std will set as well
_cov_matrix = None
_stds = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
self._default_inputs_outputs()
if meta is not None:
self.meta = meta
self._name = name
# add parameters to instance level by walking MRO list
mro = self.__class__.__mro__
for cls in mro:
if issubclass(cls, Model):
for parname, val in cls._parameters_.items():
newpar = copy.deepcopy(val)
newpar.model = self
if parname not in self.__dict__:
self.__dict__[parname] = newpar
self._initialize_constraints(kwargs)
kwargs = self._initialize_setters(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_slices()
self._initialize_unit_support()
def _default_inputs_outputs(self):
if self.n_inputs == 1 and self.n_outputs == 1:
self._inputs = ("x",)
self._outputs = ("y",)
elif self.n_inputs == 2 and self.n_outputs == 1:
self._inputs = ("x", "y")
self._outputs = ("z",)
else:
try:
self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs))
self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs))
except TypeError:
# self.n_inputs and self.n_outputs are properties
# This is the case when subclasses of Model do not define
# ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``.
self._inputs = ()
self._outputs = ()
def _initialize_setters(self, kwargs):
"""
This exists to inject defaults for settable properties for models
originating from `custom_model`.
"""
if hasattr(self, "_settable_properties"):
setters = {
name: kwargs.pop(name, default)
for name, default in self._settable_properties.items()
}
for name, value in setters.items():
setattr(self, name, value)
return kwargs
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, val):
if len(val) != self.n_inputs:
raise ValueError(
f"Expected {self.n_inputs} number of inputs, got {len(val)}."
)
self._inputs = val
self._initialize_unit_support()
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, val):
if len(val) != self.n_outputs:
raise ValueError(
f"Expected {self.n_outputs} number of outputs, got {len(val)}."
)
self._outputs = val
@property
def n_inputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``inputs`` as class variables is removed.
if hasattr(self.__class__, "n_inputs") and isinstance(
self.__class__.n_inputs, property
):
try:
return len(self.__class__.inputs)
except TypeError:
try:
return len(self.inputs)
except AttributeError:
return 0
return self.__class__.n_inputs
@property
def n_outputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``outputs`` as class variables is removed.
if hasattr(self.__class__, "n_outputs") and isinstance(
self.__class__.n_outputs, property
):
try:
return len(self.__class__.outputs)
except TypeError:
try:
return len(self.outputs)
except AttributeError:
return 0
return self.__class__.n_outputs
def _calculate_separability_matrix(self):
"""
This is a hook which customises the behavior of modeling.separable.
This allows complex subclasses to customise the separability matrix.
If it returns `NotImplemented` the default behavior is used.
"""
return NotImplemented
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolean value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {
key: self._input_units_strict for key in self.inputs
}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {
key: self._input_units_allow_dimensionless for key in self.inputs
}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
@staticmethod
def _strip_ones(intup):
return tuple(item for item in intup if item != 1)
def __setattr__(self, attr, value):
if isinstance(self, CompoundModel):
param_names = self._param_names
param_names = self.param_names
if param_names is not None and attr in self.param_names:
param = self.__dict__[attr]
value = _tofloat(value)
if param._validator is not None:
param._validator(self, value)
# check consistency with previous shape and size
eshape = self._param_metrics[attr]["shape"]
if eshape == ():
eshape = (1,)
vshape = np.array(value).shape
if vshape == ():
vshape = (1,)
esize = self._param_metrics[attr]["size"]
if np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones(
eshape
):
raise InputParameterError(
f"Value for parameter {attr} does not match shape or size\nexpected"
f" by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})"
)
if param.unit is None:
if isinstance(value, Quantity):
param._unit = value.unit
param.value = value.value
else:
param.value = value
else:
if not isinstance(value, Quantity):
raise UnitsError(
f"The '{param.name}' parameter should be given as a"
" Quantity because it was originally "
"initialized as a Quantity"
)
param._unit = value.unit
param.value = value.value
else:
if attr in ["fittable", "linear"]:
self.__dict__[attr] = value
else:
super().__setattr__(attr, value)
def _pre_evaluate(self, *args, **kwargs):
"""
Model specific input setup that needs to occur prior to model evaluation
"""
# Broadcast inputs into common size
inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs)
# Setup actual model evaluation method
parameters = self._param_sets(raw=True, units=True)
def evaluate(_inputs):
return self.evaluate(*chain(_inputs, parameters))
return evaluate, inputs, broadcasted_shapes, kwargs
def get_bounding_box(self, with_bbox=True):
"""
Return the ``bounding_box`` of a model if it exists or ``None``
otherwise.
Parameters
----------
with_bbox :
The value of the ``with_bounding_box`` keyword argument
when calling the model. Default is `True` for usage when
looking up the model's ``bounding_box`` without risk of error.
"""
bbox = None
if not isinstance(with_bbox, bool) or with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
pass
if isinstance(bbox, CompoundBoundingBox) and not isinstance(
with_bbox, bool
):
bbox = bbox[with_bbox]
return bbox
@property
def _argnames(self):
"""The inputs used to determine input_shape for bounding_box evaluation"""
return self.inputs
def _validate_input_shape(
self, _input, idx, argnames, model_set_axis, check_model_set_axis
):
"""
Perform basic validation of a single model input's shape
-- it has the minimum dimensions for the given model_set_axis
Returns the shape of the input if validation succeeds.
"""
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
f"For model_set_axis={model_set_axis}, all inputs must be at "
f"least {model_set_axis + 1}-dimensional."
)
if input_shape[model_set_axis] != self._n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
f"Input argument '{argname}' does not have the correct dimensions"
f" in model_set_axis={model_set_axis} for a model set with"
f" n_models={self._n_models}."
)
return input_shape
def _validate_input_shapes(self, inputs, argnames, model_set_axis):
"""
Perform basic validation of model inputs
--that they are mutually broadcastable and that they have
the minimum dimensions for the given model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = self._n_models > 1 and model_set_axis is not False
all_shapes = []
for idx, _input in enumerate(inputs):
all_shapes.append(
self._validate_input_shape(
_input, idx, argnames, model_set_axis, check_model_set_axis
)
)
input_shape = check_broadcast(*all_shapes)
if input_shape is None:
raise ValueError(
"All inputs must have identical shapes or must be scalars."
)
return input_shape
def input_shape(self, inputs):
"""Get input shape for bounding_box evaluation"""
return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis)
def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox):
"""
Generic model evaluation routine
Selects and evaluates model with or without bounding_box enforcement
"""
# Evaluate the model using the prepared evaluation method either
# enforcing the bounding_box or not.
bbox = self.get_bounding_box(with_bbox)
if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None:
outputs = bbox.evaluate(evaluate, _inputs, fill_value)
else:
outputs = evaluate(_inputs)
return outputs
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
Model specific post evaluation processing of outputs
"""
if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
return outputs
@property
def bbox_with_units(self):
return not isinstance(self, CompoundModel)
def __call__(self, *args, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
# Turn any keyword arguments into positional arguments.
args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs)
# Read model evaluation related parameters
with_bbox = kwargs.pop("with_bounding_box", False)
fill_value = kwargs.pop("fill_value", np.nan)
# prepare for model evaluation (overridden in CompoundModel)
evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(
*args, **kwargs
)
outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox)
# post-process evaluation results (overridden in CompoundModel)
return self._post_evaluate(
inputs, outputs, broadcasted_shapes, with_bbox, **kwargs
)
def _get_renamed_inputs_as_positional(self, *args, **kwargs):
def _keyword2positional(kwargs):
# Inputs were passed as keyword (not positional) arguments.
# Because the signature of the ``__call__`` is defined at
# the class level, the name of the inputs cannot be changed at
# the instance level and the old names are always present in the
# signature of the method. In order to use the new names of the
# inputs, the old names are taken out of ``kwargs``, the input
# values are sorted in the order of self.inputs and passed as
# positional arguments to ``__call__``.
# These are the keys that are always present as keyword arguments.
keys = [
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
]
new_inputs = {}
# kwargs contain the names of the new inputs + ``keys``
allkeys = list(kwargs.keys())
# Remove the names of the new inputs from kwargs and save them
# to a dict ``new_inputs``.
for key in allkeys:
if key not in keys:
new_inputs[key] = kwargs[key]
del kwargs[key]
return new_inputs, kwargs
n_args = len(args)
new_inputs, kwargs = _keyword2positional(kwargs)
n_all_args = n_args + len(new_inputs)
if n_all_args < self.n_inputs:
raise ValueError(
f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}"
)
elif n_all_args > self.n_inputs:
raise ValueError(
f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}"
)
if n_args == 0:
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
new_args.append(new_inputs[k])
elif n_args != self.n_inputs:
# Some inputs are passed as positional, others as keyword arguments.
args = list(args)
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
if k in new_inputs:
new_args.append(new_inputs[k])
else:
new_args.append(args[0])
del args[0]
else:
new_args = args
return new_args, kwargs
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on :ref:`astropy:modeling-model-sets`
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
self._parameters_to_array()
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
f"parameters array: {e!r}"
)
self._array_to_parameters()
@property
def sync_constraints(self):
"""
This is a boolean property that indicates whether or not accessing constraints
automatically check the constituent models current values. It defaults to True
on creation of a model, but for fitting purposes it should be set to False
for performance reasons.
"""
if not hasattr(self, "_sync_constraints"):
self._sync_constraints = True
return self._sync_constraints
@sync_constraints.setter
def sync_constraints(self, value):
if not isinstance(value, bool):
raise ValueError("sync_constraints only accepts True or False as values")
self._sync_constraints = value
@property
def fixed(self):
"""
A ``dict`` mapping parameter names to their fixed constraint.
"""
if not hasattr(self, "_fixed") or self.sync_constraints:
self._fixed = _ConstraintsDict(self, "fixed")
return self._fixed
@property
def bounds(self):
"""
A ``dict`` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
if not hasattr(self, "_bounds") or self.sync_constraints:
self._bounds = _ConstraintsDict(self, "bounds")
return self._bounds
@property
def tied(self):
"""
A ``dict`` mapping parameter names to their tied constraint.
"""
if not hasattr(self, "_tied") or self.sync_constraints:
self._tied = _ConstraintsDict(self, "tied")
return self._tied
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._mconstraints["eqcons"]
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._mconstraints["ineqcons"]
def has_inverse(self):
"""
Returns True if the model has an analytic or user
inverse defined.
"""
try:
self.inverse
except NotImplementedError:
return False
return True
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
result = self._inverse()
if result is not NotImplemented:
if not self._has_inverse_bounding_box:
result.bounding_box = None
return result
raise NotImplementedError(
"No analytical or user-supplied inverse transform "
"has been implemented for this model."
)
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse."
)
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
try:
del self._user_inverse
except AttributeError:
pass
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
raise `NotImplementedError` for no bounding_box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`astropy:bounding-boxes`
The limits are ordered according to the `numpy` ``'C'`` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: No bounding box is defined for this model
(note: the bounding box was explicitly disabled for this model;
use `del model.bounding_box` to restore the default bounding box,
if one is defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model)."
)
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError("No bounding box is defined for this model.")
elif isinstance(self._bounding_box, ModelBoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return ModelBoundingBox.validate(self, self._bounding_box())
else:
# The only other allowed possibility is that it's a ModelBoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), model=self)()
return self._bounding_box(bounding_box, model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif isinstance(bounding_box, CompoundBoundingBox) or isinstance(
bounding_box, dict
):
cls = CompoundBoundingBox
elif isinstance(self._bounding_box, type) and issubclass(
self._bounding_box, ModelBoundingBox
):
cls = self._bounding_box
else:
cls = ModelBoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
def set_slice_args(self, *args):
if isinstance(self._user_bounding_box, CompoundBoundingBox):
self._user_bounding_box.slice_args = args
else:
raise RuntimeError("The bounding_box for this model is not compound")
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def cov_matrix(self):
"""
Fitter should set covariance matrix, if available.
"""
return self._cov_matrix
@cov_matrix.setter
def cov_matrix(self, cov):
self._cov_matrix = cov
unfix_untied_params = [
p
for p in self.param_names
if (self.fixed[p] is False) and (self.tied[p] is False)
]
if type(cov) == list: # model set
param_stds = []
for c in cov:
param_stds.append(
[np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)]
)
for p, param_name in enumerate(unfix_untied_params):
par = getattr(self, param_name)
par.std = [item[p] for item in param_stds]
setattr(self, param_name, par)
else:
param_stds = [
np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)
]
for param_name in unfix_untied_params:
par = getattr(self, param_name)
par.std = param_stds.pop(0)
setattr(self, param_name, par)
@property
def stds(self):
"""
Standard deviation of parameters, if covariance matrix is available.
"""
return self._stds
@stds.setter
def stds(self, stds):
self._stds = stds
@property
def separable(self):
"""A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
raise NotImplementedError(
'The "separable" property is not defined for '
f"model {self.__class__.__name__}"
)
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have
been converted to the right units for the data, then the units have
been stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not
necessarily the units of the input data, but are derived from them.
Model subclasses that want fitting to work in the presence of
quantities need to define a ``_parameter_units_for_data_units`` method
that takes the input and output units (as two dictionaries) and
returns a dictionary giving the target units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, CompoundModel):
model.strip_units_from_tree()
return model
def output_units(self, **kwargs):
"""
Return a dictionary of output units for this model given a dictionary
of fitting inputs and outputs
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
This method will force extra model evaluations, which maybe computationally
expensive. To avoid this, one can add a return_units property to the model,
see :ref:`astropy:models_return_units`.
"""
units = self.return_units
if units is None or units == {}:
inputs = {inp: kwargs[inp] for inp in self.inputs}
values = self(**inputs)
if self.n_outputs == 1:
values = (values,)
units = {
out: getattr(values[index], "unit", dimensionless_unscaled)
for index, out in enumerate(self.outputs)
}
return units
def strip_units_from_tree(self):
for item in self._leaflist:
for parname in item.param_names:
par = getattr(item, parname)
par._set_unit(None, force=True)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units
of the input data, but are derived from them. Model subclasses that
want fitting to work in the presence of quantities need to define a
``_parameter_units_for_data_units`` method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly,
# hence the call to ``_set_unit``.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
for param in self.param_names:
if getattr(self, param).unit is not None:
return True
else:
return False
@property
def _supports_unit_fitting(self):
# If the model has a ``_parameter_units_for_data_units`` method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, "_parameter_units_for_data_units")
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel,
# important when using add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, "_input_units"):
return self._input_units
elif hasattr(self.evaluate, "__annotations__"):
annotations = self.evaluate.__annotations__.copy()
annotations.pop("return", None)
if annotations:
# If there are not annotations for all inputs this will error.
return {name: annotations[name] for name in self.inputs}
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the
output of evaluate should be in, and returns a dictionary mapping
outputs to units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, "_return_units"):
return self._return_units
elif hasattr(self.evaluate, "__annotations__"):
return self.evaluate.__annotations__.get("return", None)
else:
# None means any unit is accepted
return None
def _prepare_inputs_single_model(self, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if self.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
f"self input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} cannot be broadcast with parameter"
f" {param.name!r} of shape {param.shape!r}."
)
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if self.n_outputs > self.n_inputs:
extra_outputs = self.n_outputs - self.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_self)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
@staticmethod
def _remove_axes_from_shape(shape, axis):
"""
Given a shape tuple as the first input, construct a new one by removing
that particular axis from the shape and all preceeding axes. Negative axis
numbers are permittted, where the axis is relative to the last axis.
"""
if len(shape) == 0:
return shape
if axis < 0:
axis = len(shape) + axis
return shape[:axis] + shape[axis + 1 :]
if axis >= len(shape):
axis = len(shape) - 1
shape = shape[axis + 1 :]
return shape
def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs):
reshaped = []
pivots = []
model_set_axis_param = self.model_set_axis # needed to reshape param
for idx, _input in enumerate(inputs):
max_param_shape = ()
if self._n_models > 1 and model_set_axis_input is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (
_input.shape[:model_set_axis_input]
+ _input.shape[model_set_axis_input + 1 :]
)
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(
input_shape,
self._remove_axes_from_shape(param.shape, model_set_axis_param),
)
except IncompatibleShapeError:
raise ValueError(
f"Model input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} "
f"cannot be broadcast with parameter {param.name!r} of shape "
f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}."
)
if len(param.shape) - 1 > len(max_param_shape):
max_param_shape = self._remove_axes_from_shape(
param.shape, model_set_axis_param
)
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis_input is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model_set_axis_param
else:
pivot = input_ndim - len(max_param_shape)
new_shape = _input.shape[:pivot] + (1,) + _input.shape[pivot:]
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = self.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (
_input.shape[: pivot + 1] + new_axes + _input.shape[pivot + 1 :]
)
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if self.n_inputs < self.n_outputs:
pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs))
return reshaped, (pivots,)
def prepare_inputs(
self, *inputs, model_set_axis=None, equivalencies=None, **kwargs
):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
self._validate_input_shapes(inputs, self.inputs, model_set_axis)
inputs_map = kwargs.get("inputs_map", None)
inputs = self._validate_input_units(inputs, equivalencies, inputs_map)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if self._n_models == 1:
return self._prepare_inputs_single_model(params, inputs, **kwargs)
else:
return self._prepare_inputs_model_set(
params, inputs, model_set_axis, **kwargs
)
def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# If a leaflist is provided that means this is in the context of
# a compound model and it is necessary to create the appropriate
# alias for the input coordinate name for the equivalencies dict
if inputs_map:
edict = {}
for mod, mapping in inputs_map:
if self is mod:
edict[mapping[0]] = equivalencies[mapping[1]]
else:
edict = equivalencies
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
self.inputs, edict, self.input_units_equivalencies
)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(
input_unit, equivalencies=input_units_equivalencies[input_name]
):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is
# because some equivalencies are non-linear, and
# we need to be sure that we evaluate the model in
# its own frame of reference. If input_units_strict
# is set, we also need to convert to the input units.
if (
len(input_units_equivalencies) > 0
or self.input_units_strict[input_name]
):
inputs[i] = inputs[i].to(
input_unit,
equivalencies=input_units_equivalencies[input_name],
)
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
"could not be converted to "
"required dimensionless "
"input"
)
else:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
" could not be "
"converted to required input"
f" units of {input_unit} ({input_unit.physical_type})"
)
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (
not self.input_units_allow_dimensionless[input_name]
and input_unit is not dimensionless_unscaled
and input_unit is not None
):
if np.any(inputs[i] != 0):
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}',"
" (dimensionless), could not be converted to required "
f"input units of {input_unit} "
f"({input_unit.physical_type})"
)
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple(
Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)
)
return outputs
@staticmethod
def _prepare_output_single_model(output, broadcast_shape):
if broadcast_shape is not None:
if not broadcast_shape:
return output.item()
else:
try:
return output.reshape(broadcast_shape)
except ValueError:
try:
return output.item()
except ValueError:
return output
return output
def _prepare_outputs_single_model(self, outputs, broadcasted_shapes):
outputs = list(outputs)
for idx, output in enumerate(outputs):
try:
broadcast_shape = check_broadcast(*broadcasted_shapes[0])
except (IndexError, TypeError):
broadcast_shape = broadcasted_shapes[0][idx]
outputs[idx] = self._prepare_output_single_model(output, broadcast_shape)
return tuple(outputs)
def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis):
pivots = broadcasted_shapes[0]
# If model_set_axis = False was passed then use
# self._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = self.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot, model_set_axis)
return tuple(outputs)
def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs):
model_set_axis = kwargs.get("model_set_axis", None)
if len(self) == 1:
return self._prepare_outputs_single_model(outputs, broadcasted_shapes)
else:
return self._prepare_outputs_model_set(
outputs, broadcasted_shapes, model_set_axis
)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return self.copy()
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
def coerce_units(
self,
input_units=None,
return_units=None,
input_units_equivalencies=None,
input_units_allow_dimensionless=False,
):
"""
Attach units to this (unitless) model.
Parameters
----------
input_units : dict or tuple, optional
Input units to attach. If dict, each key is the name of a model input,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.inputs`.
return_units : dict or tuple, optional
Output units to attach. If dict, each key is the name of a model output,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.outputs`.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : bool or dict, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
Returns
-------
`CompoundModel`
A `CompoundModel` composed of the current model plus
`~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units.
Raises
------
ValueError
If the current model already has units.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True)
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
from .mappings import UnitsMapping
result = self
if input_units is not None:
if self.input_units is not None:
model_units = self.input_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.inputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify input_units for model with existing input units"
)
if isinstance(input_units, dict):
if input_units.keys() != set(self.inputs):
message = (
f"""input_units keys ({", ".join(input_units.keys())}) """
f"""do not match model inputs ({", ".join(self.inputs)})"""
)
raise ValueError(message)
input_units = [input_units[i] for i in self.inputs]
if len(input_units) != self.n_inputs:
message = (
"input_units length does not match n_inputs: "
f"expected {self.n_inputs}, received {len(input_units)}"
)
raise ValueError(message)
mapping = tuple(
(unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units)
)
input_mapping = UnitsMapping(
mapping,
input_units_equivalencies=input_units_equivalencies,
input_units_allow_dimensionless=input_units_allow_dimensionless,
)
input_mapping.inputs = self.inputs
input_mapping.outputs = self.inputs
result = input_mapping | result
if return_units is not None:
if self.return_units is not None:
model_units = self.return_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.outputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify return_units for model "
"with existing output units"
)
if isinstance(return_units, dict):
if return_units.keys() != set(self.outputs):
message = (
f"""return_units keys ({", ".join(return_units.keys())}) """
f"""do not match model outputs ({", ".join(self.outputs)})"""
)
raise ValueError(message)
return_units = [return_units[i] for i in self.outputs]
if len(return_units) != self.n_outputs:
message = (
"return_units length does not match n_outputs: "
f"expected {self.n_outputs}, received {len(return_units)}"
)
raise ValueError(message)
mapping = tuple(
(model_units.get(i), unit)
for i, unit in zip(self.outputs, return_units)
)
return_mapping = UnitsMapping(mapping)
return_mapping.inputs = self.outputs
return_mapping.outputs = self.outputs
result = result | return_mapping
return result
@property
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
for ckey, cvalue in values.items():
param = getattr(self, ckey)
setattr(param, constraint, cvalue)
self._mconstraints = {}
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._mconstraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
n_models = kwargs.pop("n_models", None)
if not (
n_models is None
or (isinstance(n_models, (int, np.integer)) and n_models >= 1)
):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
f"(got {n_models!r})"
)
model_set_axis = kwargs.pop("model_set_axis", None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (
model_set_axis is False
or np.issubdtype(type(model_set_axis), np.integer)
):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
f"model in a set of models (got {model_set_axis!r})."
)
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = set()
if len(args) > len(self.param_names):
raise TypeError(
f"{self.__class__.__name__}.__init__() takes at most "
f"{len(self.param_names)} positional arguments ({len(args)} given)"
)
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
param_name = self.param_names[idx]
params.add(param_name)
if not isinstance(arg, Parameter):
value = quantity_asanyarray(arg, dtype=float)
else:
value = arg
self._initialize_parameter_value(param_name, value)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
f"{self.__class__.__name__}.__init__() got multiple values for"
f" parameter {param_name!r}"
)
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray
# because if any of the arguments are quantities, we need
# to return a Quantity object not a plain Numpy array.
value = quantity_asanyarray(value, dtype=float)
params.add(param_name)
self._initialize_parameter_value(param_name, value)
# Now deal with case where param_name is not supplied by args or kwargs
for param_name in self.param_names:
if param_name not in params:
self._initialize_parameter_value(param_name, None)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
f"{self.__class__.__name__}.__init__() got an unrecognized"
f" parameter {kwarg!r}"
)
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name in self.param_names:
value = getattr(self, name)
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension at least"
f" {min_ndim} for model_set_axis={model_set_axis} (the value"
f" given for {name!r} is only {param_ndim}-dimensional)"
)
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
f"Inconsistent dimensions for parameter {name!r} for"
f" {n_models} model sets. The length of axis"
f" {model_set_axis} must be the same for all input parameter"
" values"
)
self._check_param_broadcast(max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(None)
self._n_models = n_models
# now validate parameters
for name in params:
param = getattr(self, name)
if param._validator is not None:
param._validator(self, param.value)
def _initialize_parameter_value(self, param_name, value):
"""Mostly deals with consistency checks and determining unit issues."""
if isinstance(value, Parameter):
self.__dict__[param_name] = value
return
param = getattr(self, param_name)
# Use default if value is not provided
if value is None:
default = param.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
f"{self.__class__.__name__}.__init__() requires a value for "
f"parameter {param_name!r}"
)
value = default
unit = param.unit
else:
if isinstance(value, Quantity):
unit = value.unit
value = value.value
else:
unit = None
if unit is None and param.unit is not None:
raise InputParameterError(
f"{self.__class__.__name__}.__init__() requires a Quantity for"
f" parameter {param_name!r}"
)
param._unit = unit
param._set_unit(unit, force=True)
param.internal_unit = None
if param._setter is not None:
if unit is not None:
_val = param._setter(value * unit)
else:
_val = param._setter(value)
if isinstance(_val, Quantity):
param.internal_unit = _val.unit
param._internal_value = np.array(_val.value)
else:
param.internal_unit = None
param._internal_value = np.array(_val)
else:
param._value = np.array(value)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
def _parameters_to_array(self):
# Now set the parameter values (this will also fill
# self._parameters)
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = param.value
if not isinstance(value, np.ndarray):
value = np.array([value])
self._parameters[param_metrics[name]["slice"]] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
def _array_to_parameters(self):
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = self._parameters[param_metrics[name]["slice"]]
value.shape = param_metrics[name]["shape"]
param.value = value
def _check_param_broadcast(self, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
model_set_axis = self._model_set_axis
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (
param_shape[: model_set_axis + 1]
+ new_axes
+ param_shape[model_set_axis + 1 :]
)
self._param_metrics[name]["broadcast_shape"] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = self.param_names[shape_a_idx]
param_b = self.param_names[shape_b_idx]
raise InputParameterError(
f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with "
f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules."
)
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw and param._setter:
value = param._internal_value
else:
value = param.value
broadcast_shape = self._param_metrics[name].get("broadcast_shape")
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and param.internal_unit is not None:
unit = param.internal_unit
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit, subok=True)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
parts = [repr(a) for a in args]
parts.extend(
f"{name}={param_repr_oneline(getattr(self, name))}"
for name in self.param_names
)
if self.name is not None:
parts.append(f"name={self.name!r}")
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] == value:
continue
parts.append(f"{kwarg}={value!r}")
if len(self) > 1:
parts.append(f"n_models={len(self)}")
return f"<{self.__class__.__name__}({', '.join(parts)})>"
def _format_str(self, keywords=[], defaults={}):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
("Model", self.__class__.__name__),
("Name", self.name),
("Inputs", self.inputs),
("Outputs", self.outputs),
("Model set size", len(self)),
]
parts = [
f"{keyword}: {value}"
for keyword, value in default_keywords
if value is not None
]
for keyword, value in keywords:
if keyword.lower() in defaults and defaults[keyword.lower()] == value:
continue
parts.append(f"{keyword}: {value}")
parts.append("Parameters:")
if len(self) == 1:
columns = [[getattr(self, name).value] for name in self.param_names]
else:
columns = [getattr(self, name).value for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return "\n".join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 1
n_outputs = 1
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 2
n_outputs = 1
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params), f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (
lambda inputs, params: (
f[0](inputs[: f[1]], params) + g[0](inputs[f[1] :], params)
),
f[1] + g[1],
f[2] + g[2],
)
BINARY_OPERATORS = {
"+": _make_arithmetic_operator(operator.add),
"-": _make_arithmetic_operator(operator.sub),
"*": _make_arithmetic_operator(operator.mul),
"/": _make_arithmetic_operator(operator.truediv),
"**": _make_arithmetic_operator(operator.pow),
"|": _composition_operator,
"&": _join_operator,
}
SPECIAL_OPERATORS = _SpecialOperatorsDict()
def _add_special_operator(sop_name, sop):
return SPECIAL_OPERATORS.add(sop_name, sop)
class CompoundModel(Model):
"""
Base class for compound models.
While it can be used directly, the recommended way
to combine models is through the model operators.
"""
def __init__(self, op, left, right, name=None):
self.__dict__["_param_names"] = None
self._n_submodels = None
self.op = op
self.left = left
self.right = right
self._bounding_box = None
self._user_bounding_box = None
self._leaflist = None
self._tdict = None
self._parameters = None
self._parameters_ = None
self._param_metrics = None
if op != "fix_inputs" and len(left) != len(right):
raise ValueError("Both operands must have equal values for n_models")
self._n_models = len(left)
if op != "fix_inputs" and (
(left.model_set_axis != right.model_set_axis) or left.model_set_axis
): # not False and not 0
raise ValueError(
"model_set_axis must be False or 0 and consistent for operands"
)
self._model_set_axis = left.model_set_axis
if op in ["+", "-", "*", "/", "**"] or op in SPECIAL_OPERATORS:
if left.n_inputs != right.n_inputs or left.n_outputs != right.n_outputs:
raise ModelDefinitionError(
"Both operands must match numbers of inputs and outputs"
)
self.n_inputs = left.n_inputs
self.n_outputs = left.n_outputs
self.inputs = left.inputs
self.outputs = left.outputs
elif op == "&":
self.n_inputs = left.n_inputs + right.n_inputs
self.n_outputs = left.n_outputs + right.n_outputs
self.inputs = combine_labels(left.inputs, right.inputs)
self.outputs = combine_labels(left.outputs, right.outputs)
elif op == "|":
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |:"
f" {left.name} (n_inputs={left.n_inputs},"
f" n_outputs={left.n_outputs}) and"
f" {right.name} (n_inputs={right.n_inputs},"
f" n_outputs={right.n_outputs}); n_outputs for the left-hand model"
" must match n_inputs for the right-hand model."
)
self.n_inputs = left.n_inputs
self.n_outputs = right.n_outputs
self.inputs = left.inputs
self.outputs = right.outputs
elif op == "fix_inputs":
if not isinstance(left, Model):
raise ValueError(
'First argument to "fix_inputs" must be an instance of '
"an astropy Model."
)
if not isinstance(right, dict):
raise ValueError(
'Expected a dictionary for second argument of "fix_inputs".'
)
# Dict keys must match either possible indices
# for model on left side, or names for inputs.
self.n_inputs = left.n_inputs - len(right)
# Assign directly to the private attribute (instead of using the setter)
# to avoid asserting the new number of outputs matches the old one.
self._outputs = left.outputs
self.n_outputs = left.n_outputs
newinputs = list(left.inputs)
keys = right.keys()
input_ind = []
for key in keys:
if np.issubdtype(type(key), np.integer):
if key >= left.n_inputs or key < 0:
raise ValueError(
"Substitution key integer value "
"not among possible input choices."
)
if key in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(key)
elif isinstance(key, str):
if key not in left.inputs:
raise ValueError(
"Substitution key string not among possible input choices."
)
# Check to see it doesn't match positional
# specification.
ind = left.inputs.index(key)
if ind in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(ind)
# Remove substituted inputs
input_ind.sort()
input_ind.reverse()
for ind in input_ind:
del newinputs[ind]
self.inputs = tuple(newinputs)
# Now check to see if the input model has bounding_box defined.
# If so, remove the appropriate dimensions and set it for this
# instance.
try:
self.bounding_box = self.left.bounding_box.fix_inputs(self, right)
except NotImplementedError:
pass
else:
raise ModelDefinitionError("Illegal operator: ", self.op)
self.name = name
self._fittable = None
self.fit_deriv = None
self.col_fit_deriv = None
if op in ("|", "+", "-"):
self.linear = left.linear and right.linear
else:
self.linear = False
self.eqcons = []
self.ineqcons = []
self.n_left_params = len(self.left.parameters)
self._map_parameters()
def _get_left_inputs_from_args(self, args):
return args[: self.left.n_inputs]
def _get_right_inputs_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs : self.left.n_inputs + self.right.n_inputs]
elif op == "|" or op == "fix_inputs":
return None
else:
return args[: self.left.n_inputs]
def _get_left_params_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
n_inputs = self.left.n_inputs + self.right.n_inputs
return args[n_inputs : n_inputs + self.n_left_params]
else:
return args[self.left.n_inputs : self.left.n_inputs + self.n_left_params]
def _get_right_params_from_args(self, args):
op = self.op
if op == "fix_inputs":
return None
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params :]
else:
return args[self.left.n_inputs + self.n_left_params :]
def _get_kwarg_model_parameters_as_positional(self, args, kwargs):
# could do it with inserts but rebuilding seems like simpilist way
# TODO: Check if any param names are in kwargs maybe as an intersection of sets?
if self.op == "&":
new_args = list(args[: self.left.n_inputs + self.right.n_inputs])
args_pos = self.left.n_inputs + self.right.n_inputs
else:
new_args = list(args[: self.left.n_inputs])
args_pos = self.left.n_inputs
for param_name in self.param_names:
kw_value = kwargs.pop(param_name, None)
if kw_value is not None:
value = kw_value
else:
try:
value = args[args_pos]
except IndexError:
raise IndexError("Missing parameter or input")
args_pos += 1
new_args.append(value)
return new_args, kwargs
def _apply_operators_to_value_lists(self, leftval, rightval, **kw):
op = self.op
if op == "+":
return binary_operation(operator.add, leftval, rightval)
elif op == "-":
return binary_operation(operator.sub, leftval, rightval)
elif op == "*":
return binary_operation(operator.mul, leftval, rightval)
elif op == "/":
return binary_operation(operator.truediv, leftval, rightval)
elif op == "**":
return binary_operation(operator.pow, leftval, rightval)
elif op == "&":
if not isinstance(leftval, tuple):
leftval = (leftval,)
if not isinstance(rightval, tuple):
rightval = (rightval,)
return leftval + rightval
elif op in SPECIAL_OPERATORS:
return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval)
else:
raise ModelDefinitionError("Unrecognized operator {op}")
def evaluate(self, *args, **kw):
op = self.op
args, kw = self._get_kwarg_model_parameters_as_positional(args, kw)
left_inputs = self._get_left_inputs_from_args(args)
left_params = self._get_left_params_from_args(args)
if op == "fix_inputs":
pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs)))
fixed_inputs = {
key if np.issubdtype(type(key), np.integer) else pos_index[key]: value
for key, value in self.right.items()
}
left_inputs = [
fixed_inputs[ind] if ind in fixed_inputs.keys() else inp
for ind, inp in enumerate(left_inputs)
]
leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params))
if op == "fix_inputs":
return leftval
right_inputs = self._get_right_inputs_from_args(args)
right_params = self._get_right_params_from_args(args)
if op == "|":
if isinstance(leftval, tuple):
return self.right.evaluate(*itertools.chain(leftval, right_params))
else:
return self.right.evaluate(leftval, *right_params)
else:
rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params))
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
@property
def n_submodels(self):
if self._leaflist is None:
self._make_leaflist()
return len(self._leaflist)
@property
def submodel_names(self):
"""Return the names of submodels in a ``CompoundModel``."""
if self._leaflist is None:
self._make_leaflist()
names = [item.name for item in self._leaflist]
nonecount = 0
newnames = []
for item in names:
if item is None:
newnames.append(f"None_{nonecount}")
nonecount += 1
else:
newnames.append(item)
return tuple(newnames)
def both_inverses_exist(self):
"""
if both members of this compound model have inverses return True
"""
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"CompoundModel.both_inverses_exist is deprecated. Use has_inverse instead.",
AstropyDeprecationWarning,
)
try:
self.left.inverse
self.right.inverse
except NotImplementedError:
return False
return True
def _pre_evaluate(self, *args, **kwargs):
"""
CompoundModel specific input setup that needs to occur prior to
model evaluation.
Note
----
All of the _pre_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
# If equivalencies are provided, necessary to map parameters and pass
# the leaflist as a keyword input for use by model evaluation so that
# the compound model input names can be matched to the model input
# names.
if "equivalencies" in kwargs:
# Restructure to be useful for the individual model lookup
kwargs["inputs_map"] = [
(value[0], (value[1], key)) for key, value in self.inputs_map().items()
]
# Setup actual model evaluation method
def evaluate(_inputs):
return self._evaluate(*_inputs, **kwargs)
return evaluate, args, None, kwargs
@property
def _argnames(self):
"""
No inputs should be used to determine input_shape when handling compound models
"""
return ()
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
CompoundModel specific post evaluation processing of outputs
Note
----
All of the _post_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1:
return outputs[0]
return outputs
def _evaluate(self, *args, **kw):
op = self.op
if op != "fix_inputs":
if op != "&":
leftval = self.left(*args, **kw)
if op != "|":
rightval = self.right(*args, **kw)
else:
rightval = None
else:
leftval = self.left(*(args[: self.left.n_inputs]), **kw)
rightval = self.right(*(args[self.left.n_inputs :]), **kw)
if op != "|":
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
elif op == "|":
if isinstance(leftval, tuple):
return self.right(*leftval, **kw)
else:
return self.right(leftval, **kw)
else:
subs = self.right
newargs = list(args)
subinds = []
subvals = []
for key in subs.keys():
if np.issubdtype(type(key), np.integer):
subinds.append(key)
elif isinstance(key, str):
ind = self.left.inputs.index(key)
subinds.append(ind)
subvals.append(subs[key])
# Turn inputs specified in kw into positional indices.
# Names for compound inputs do not propagate to sub models.
kwind = []
kwval = []
for kwkey in list(kw.keys()):
if kwkey in self.inputs:
ind = self.inputs.index(kwkey)
if ind < len(args):
raise ValueError(
"Keyword argument duplicates positional value supplied."
)
kwind.append(ind)
kwval.append(kw[kwkey])
del kw[kwkey]
# Build new argument list
# Append keyword specified args first
if kwind:
kwargs = list(zip(kwind, kwval))
kwargs.sort()
kwindsorted, kwvalsorted = list(zip(*kwargs))
newargs = newargs + list(kwvalsorted)
if subinds:
subargs = list(zip(subinds, subvals))
subargs.sort()
# subindsorted, subvalsorted = list(zip(*subargs))
# The substitutions must be inserted in order
for ind, val in subargs:
newargs.insert(ind, val)
return self.left(*newargs, **kw)
@property
def param_names(self):
"""An ordered list of parameter names."""
return self._param_names
def _make_leaflist(self):
tdict = {}
leaflist = []
make_subtree_dict(self, "", tdict, leaflist)
self._leaflist = leaflist
self._tdict = tdict
def __getattr__(self, name):
"""
If someone accesses an attribute not already defined, map the
parameters, and then see if the requested attribute is one of
the parameters
"""
# The following test is needed to avoid infinite recursion
# caused by deepcopy. There may be other such cases discovered.
if name == "__setstate__":
raise AttributeError
if name in self._param_names:
return self.__dict__[name]
else:
raise AttributeError(f'Attribute "{name}" not found')
def __getitem__(self, index):
if self._leaflist is None:
self._make_leaflist()
leaflist = self._leaflist
tdict = self._tdict
if isinstance(index, slice):
if index.step:
raise ValueError("Steps in slices not supported for compound models")
if index.start is not None:
if isinstance(index.start, str):
start = self._str_index_to_int(index.start)
else:
start = index.start
else:
start = 0
if index.stop is not None:
if isinstance(index.stop, str):
stop = self._str_index_to_int(index.stop)
else:
stop = index.stop - 1
else:
stop = len(leaflist) - 1
if index.stop == 0:
raise ValueError("Slice endpoint cannot be 0")
if start < 0:
start = len(leaflist) + start
if stop < 0:
stop = len(leaflist) + stop
# now search for matching node:
if stop == start: # only single value, get leaf instead in code below
index = start
else:
for key in tdict:
node, leftind, rightind = tdict[key]
if leftind == start and rightind == stop:
return node
raise IndexError("No appropriate subtree matches slice")
if np.issubdtype(type(index), np.integer):
return leaflist[index]
elif isinstance(index, str):
return leaflist[self._str_index_to_int(index)]
else:
raise TypeError("index must be integer, slice, or model name string")
def _str_index_to_int(self, str_index):
# Search through leaflist for item with that name
found = []
for nleaf, leaf in enumerate(self._leaflist):
if getattr(leaf, "name", None) == str_index:
found.append(nleaf)
if len(found) == 0:
raise IndexError(f"No component with name '{str_index}' found")
if len(found) > 1:
raise IndexError(
f"Multiple components found using '{str_index}' as name\n"
f"at indices {found}"
)
return found[0]
@property
def n_inputs(self):
"""The number of inputs of a model."""
return self._n_inputs
@n_inputs.setter
def n_inputs(self, value):
self._n_inputs = value
@property
def n_outputs(self):
"""The number of outputs of a model."""
return self._n_outputs
@n_outputs.setter
def n_outputs(self, value):
self._n_outputs = value
@property
def eqcons(self):
return self._eqcons
@eqcons.setter
def eqcons(self, value):
self._eqcons = value
@property
def ineqcons(self):
return self._eqcons
@ineqcons.setter
def ineqcons(self, value):
self._eqcons = value
def traverse_postorder(self, include_operator=False):
"""Postorder traversal of the CompoundModel tree."""
res = []
if isinstance(self.left, CompoundModel):
res = res + self.left.traverse_postorder(include_operator)
else:
res = res + [self.left]
if isinstance(self.right, CompoundModel):
res = res + self.right.traverse_postorder(include_operator)
else:
res = res + [self.right]
if include_operator:
res.append(self.op)
else:
res.append(self)
return res
def _format_expression(self, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: f"[{i}]" # noqa: E731
for node in self.traverse_postorder():
if not isinstance(node, CompoundModel):
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
right = operands.pop()
left = operands.pop()
if node.op in OPERATOR_PRECEDENCE:
oper_order = OPERATOR_PRECEDENCE[node.op]
if isinstance(node, CompoundModel):
if (
isinstance(node.left, CompoundModel)
and OPERATOR_PRECEDENCE[node.left.op] < oper_order
):
left = f"({left})"
if (
isinstance(node.right, CompoundModel)
and OPERATOR_PRECEDENCE[node.right.op] < oper_order
):
right = f"({right})"
operands.append(" ".join((left, node.op, right)))
else:
left = f"(({left}),"
right = f"({right}))"
operands.append(" ".join((node.op[0], left, right)))
return "".join(operands)
def _format_components(self):
if self._parameters_ is None:
self._map_parameters()
return "\n\n".join(f"[{idx}]: {m!r}" for idx, m in enumerate(self._leaflist))
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
("Expression", expression),
("Components", "\n" + indent(components)),
]
return super()._format_str(keywords=keywords)
def rename(self, name):
self.name = name
return self
@property
def isleaf(self):
return False
@property
def inverse(self):
if self.op == "|":
return self.right.inverse | self.left.inverse
elif self.op == "&":
return self.left.inverse & self.right.inverse
else:
return NotImplemented
@property
def fittable(self):
"""Set the fittable attribute on a compound model."""
if self._fittable is None:
if self._leaflist is None:
self._map_parameters()
self._fittable = all(m.fittable for m in self._leaflist)
return self._fittable
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
def _map_parameters(self):
"""
Map all the constituent model parameters to the compound object,
renaming as necessary by appending a suffix number.
This can be an expensive operation, particularly for a complex
expression tree.
All the corresponding parameter attributes are created that one
expects for the Model class.
The parameter objects that the attributes point to are the same
objects as in the constiutent models. Changes made to parameter
values to either are seen by both.
Prior to calling this, none of the associated attributes will
exist. This method must be called to make the model usable by
fitting engines.
If oldnames=True, then parameters are named as in the original
implementation of compound models.
"""
if self._parameters is not None:
# do nothing
return
if self._leaflist is None:
self._make_leaflist()
self._parameters_ = {}
param_map = {}
self._param_names = []
for lindex, leaf in enumerate(self._leaflist):
if not isinstance(leaf, dict):
for param_name in leaf.param_names:
param = getattr(leaf, param_name)
new_param_name = f"{param_name}_{lindex}"
self.__dict__[new_param_name] = param
self._parameters_[new_param_name] = param
self._param_names.append(new_param_name)
param_map[new_param_name] = (lindex, param_name)
self._param_metrics = {}
self._param_map = param_map
self._param_map_inverse = {v: k for k, v in param_map.items()}
self._initialize_slices()
self._param_names = tuple(self._param_names)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name] = {}
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
@staticmethod
def _recursive_lookup(branch, adict, key):
if isinstance(branch, CompoundModel):
return adict[key]
return branch, key
def inputs_map(self):
"""
Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.
"""
inputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {inp: (self, inp) for inp in self.inputs}
elif self.op == "|":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
if isinstance(self.right, CompoundModel):
r_inputs_map = self.right.inputs_map()
for i, inp in enumerate(self.inputs):
if i < len(self.left.inputs): # Get from left
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[self.left.inputs[i]]
else:
inputs_map[inp] = self.left, self.left.inputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
inputs_map[inp] = r_inputs_map[
self.right.inputs[i - len(self.left.inputs)]
]
else:
inputs_map[inp] = (
self.right,
self.right.inputs[i - len(self.left.inputs)],
)
elif self.op == "fix_inputs":
fixed_ind = list(self.right.keys())
ind = [
list(self.left.inputs).index(i) if isinstance(i, str) else i
for i in fixed_ind
]
inp_ind = list(range(self.left.n_inputs))
for i in ind:
inp_ind.remove(i)
for i in inp_ind:
inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i]
else:
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.left.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
return inputs_map
def _parameter_units_for_data_units(self, input_units, output_units):
if self._leaflist is None:
self._map_parameters()
units_for_data = {}
for imodel, model in enumerate(self._leaflist):
units_for_data_leaf = model._parameter_units_for_data_units(
input_units, output_units
)
for param_leaf in units_for_data_leaf:
param = self._param_map_inverse[(imodel, param_leaf)]
units_for_data[param] = units_for_data_leaf[param_leaf]
return units_for_data
@property
def input_units(self):
inputs_map = self.inputs_map()
input_units_dict = {
key: inputs_map[key][0].input_units[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units is not None
}
if input_units_dict:
return input_units_dict
return None
@property
def input_units_equivalencies(self):
inputs_map = self.inputs_map()
input_units_equivalencies_dict = {
key: inputs_map[key][0].input_units_equivalencies[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units_equivalencies is not None
}
if not input_units_equivalencies_dict:
return None
return input_units_equivalencies_dict
@property
def input_units_allow_dimensionless(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_allow_dimensionless[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def input_units_strict(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_strict[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def return_units(self):
outputs_map = self.outputs_map()
return {
key: outputs_map[key][0].return_units[orig_key]
for key, (mod, orig_key) in outputs_map.items()
if outputs_map[key][0].return_units is not None
}
def outputs_map(self):
"""
Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.
"""
outputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {out: (self, out) for out in self.outputs}
elif self.op == "|":
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for out in self.outputs:
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[out]
else:
outputs_map[out] = self.right, out
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for i, out in enumerate(self.outputs):
if i < len(self.left.outputs): # Get from left
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map[self.left.outputs[i]]
else:
outputs_map[out] = self.left, self.left.outputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[
self.right.outputs[i - len(self.left.outputs)]
]
else:
outputs_map[out] = (
self.right,
self.right.outputs[i - len(self.left.outputs)],
)
elif self.op == "fix_inputs":
return self.left.outputs_map()
else:
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
for out in self.left.outputs:
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map()[out]
else:
outputs_map[out] = self.left, out
return outputs_map
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = self.get_bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel, important when using
# add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
def replace_submodel(self, name, model):
"""
Construct a new `~astropy.modeling.CompoundModel` instance from an
existing CompoundModel, replacing the named submodel with a new model.
In order to ensure that inverses and names are kept/reconstructed, it's
necessary to rebuild the CompoundModel from the replaced node all the
way back to the base. The original CompoundModel is left untouched.
Parameters
----------
name : str
name of submodel to be replaced
model : `~astropy.modeling.Model`
replacement model
"""
submodels = [
m for m in self.traverse_postorder() if getattr(m, "name", None) == name
]
if submodels:
if len(submodels) > 1:
raise ValueError(f"More than one submodel named {name}")
old_model = submodels.pop()
if len(old_model) != len(model):
raise ValueError(
"New and old models must have equal values for n_models"
)
# Do this check first in order to raise a more helpful Exception,
# although it would fail trying to construct the new CompoundModel
if (
old_model.n_inputs != model.n_inputs
or old_model.n_outputs != model.n_outputs
):
raise ValueError(
"New model must match numbers of inputs and "
"outputs of existing model"
)
tree = _get_submodel_path(self, name)
while tree:
branch = self.copy()
for node in tree[:-1]:
branch = getattr(branch, node)
setattr(branch, tree[-1], model)
model = CompoundModel(
branch.op, branch.left, branch.right, name=branch.name
)
tree = tree[:-1]
return model
else:
raise ValueError(f"No submodels found named {name}")
def _set_sub_models_and_parameter_units(self, left, right):
"""
Provides a work-around to properly set the sub models and respective
parameters's units/values when using ``without_units_for_data``
or ``without_units_for_data`` methods.
"""
model = CompoundModel(self.op, left, right)
self.left = left
self.right = right
for name in model.param_names:
model_parameter = getattr(model, name)
parameter = getattr(self, name)
parameter.value = model_parameter.value
parameter._set_unit(model_parameter.unit, force=True)
def without_units_for_data(self, **kwargs):
"""
See `~astropy.modeling.Model.without_units_for_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. It does this
by modifying the output units of each sub model by using the output
units of the other sub model so that we can apply the original function
and get the desired result.
Additional data has to be output in the mixed output unit case
so that the units can be properly rebuilt by
`~astropy.modeling.CompoundModel.with_units_from_data`.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
model = self.copy()
inputs = {inp: kwargs[inp] for inp in self.inputs}
left_units = self.left.output_units(**kwargs)
right_units = self.right.output_units(**kwargs)
if self.op == "*":
left_kwargs = {
out: kwargs[out] / right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: kwargs[out] / left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
else:
left_kwargs = {
out: kwargs[out] * right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: 1 / kwargs[out] * left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
left_kwargs.update(inputs.copy())
right_kwargs.update(inputs.copy())
left = self.left.without_units_for_data(**left_kwargs)
if isinstance(left, tuple):
left_kwargs["_left_kwargs"] = left[1]
left_kwargs["_right_kwargs"] = left[2]
left = left[0]
right = self.right.without_units_for_data(**right_kwargs)
if isinstance(right, tuple):
right_kwargs["_left_kwargs"] = right[1]
right_kwargs["_right_kwargs"] = right[2]
right = right[0]
model._set_sub_models_and_parameter_units(left, right)
return model, left_kwargs, right_kwargs
else:
return super().without_units_for_data(**kwargs)
def with_units_from_data(self, **kwargs):
"""
See `~astropy.modeling.Model.with_units_from_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. In order to
do this it requires some additional information output by
`~astropy.modeling.CompoundModel.without_units_for_data` passed as
keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
left_kwargs = kwargs.pop("_left_kwargs")
right_kwargs = kwargs.pop("_right_kwargs")
left = self.left.with_units_from_data(**left_kwargs)
right = self.right.with_units_from_data(**right_kwargs)
model = self.copy()
model._set_sub_models_and_parameter_units(left, right)
return model
else:
return super().with_units_from_data(**kwargs)
def _get_submodel_path(model, name):
"""Find the route down a CompoundModel's tree to the model with the
specified name (whether it's a leaf or not)"""
if getattr(model, "name", None) == name:
return []
try:
return ["left"] + _get_submodel_path(model.left, name)
except (AttributeError, TypeError):
pass
try:
return ["right"] + _get_submodel_path(model.right, name)
except (AttributeError, TypeError):
pass
def binary_operation(binoperator, left, right):
"""
Perform binary operation. Operands may be matching tuples of operands.
"""
if isinstance(left, tuple) and isinstance(right, tuple):
return tuple(binoperator(item[0], item[1]) for item in zip(left, right))
return binoperator(left, right)
def get_ops(tree, opset):
"""
Recursive function to collect operators used.
"""
if isinstance(tree, CompoundModel):
opset.add(tree.op)
get_ops(tree.left, opset)
get_ops(tree.right, opset)
else:
return
def make_subtree_dict(tree, nodepath, tdict, leaflist):
"""
Traverse a tree noting each node by a key that indicates all the
left/right choices necessary to reach that node. Each key will
reference a tuple that contains:
- reference to the compound model for that node.
- left most index contained within that subtree
(relative to all indices for the whole tree)
- right most index contained within that subtree
"""
# if this is a leaf, just append it to the leaflist
if not hasattr(tree, "isleaf"):
leaflist.append(tree)
else:
leftmostind = len(leaflist)
make_subtree_dict(tree.left, nodepath + "l", tdict, leaflist)
make_subtree_dict(tree.right, nodepath + "r", tdict, leaflist)
rightmostind = len(leaflist) - 1
tdict[nodepath] = (tree, leftmostind, rightmostind)
_ORDER_OF_OPERATORS = [("fix_inputs",), ("|",), ("&",), ("+", "-"), ("*", "/"), ("**",)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None):
"""
This function creates a compound model with one or more of the input
values of the input model assigned fixed values (scalar or array).
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that one or more of the
model input values will be fixed to some constant value.
values : dict
A dictionary where the key identifies which input to fix
and its value is the value to fix it at. The key may either be the
name of the input or a number reflecting its order in the inputs.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> g = Gaussian2D(1, 2, 3, 4, 5)
>>> gv = fix_inputs(g, {0: 2.5})
Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y)
"""
model = CompoundModel("fix_inputs", modelinstance, values)
if bounding_boxes is not None:
if selector_args is None:
selector_args = tuple((key, True) for key in values.keys())
bbox = CompoundBoundingBox.validate(
modelinstance, bounding_boxes, selector_args
)
_selector = bbox.selector_args.get_fixed_values(modelinstance, values)
new_bbox = bbox[_selector]
new_bbox = new_bbox.__class__.validate(model, new_bbox)
model.bounding_box = new_bbox
return model
def bind_bounding_box(modelinstance, bounding_box, ignored=None, order="C"):
"""
Set a validated bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated bounding box will be set on.
bounding_box : tuple
A bounding box tuple, see :ref:`astropy:bounding-boxes` for details
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = ModelBoundingBox.validate(
modelinstance, bounding_box, ignored=ignored, order=order
)
def bind_compound_bounding_box(
modelinstance,
bounding_boxes,
selector_args,
create_selector=None,
ignored=None,
order="C",
):
"""
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = CompoundBoundingBox.validate(
modelinstance,
bounding_boxes,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
)
def custom_model(*args, fit_deriv=None):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
The model is separable only if there is a single input.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
The standard settable model properties can be configured by default
using keyword arguments matching the name of the property; however,
these values are not set as model "parameters". Moreover, users
cannot use keyword arguments matching non-settable model properties,
with the exception of ``n_outputs`` which should be set to the number of
outputs of your function.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
f"{__name__} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any)."
)
def _custom_model_inputs(func):
"""
Processes the inputs to the `custom_model`'s function into the appropriate
categories.
Parameters
----------
func : callable
Returns
-------
inputs : list
list of evaluation inputs
special_params : dict
dictionary of model properties which require special treatment
settable_params : dict
dictionary of defaults for settable model properties
params : dict
dictionary of model parameters set by `custom_model`'s function
"""
inputs, parameters = get_inputs_and_params(func)
special = ["n_outputs"]
settable = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is not None
]
properties = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is None and attr not in special
]
special_params = {}
settable_params = {}
params = {}
for param in parameters:
if param.name in special:
special_params[param.name] = param.default
elif param.name in settable:
settable_params[param.name] = param.default
elif param.name in properties:
raise ValueError(
f"Parameter '{param.name}' cannot be a model property: {properties}."
)
else:
params[param.name] = param.default
return inputs, special_params, settable_params, params
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable object"
)
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other callable object"
)
model_name = func.__name__
inputs, special_params, settable_params, params = _custom_model_inputs(func)
if fit_deriv is not None and len(fit_deriv.__defaults__) != len(params):
raise ModelDefinitionError(
"derivative function should accept same number of parameters as func."
)
params = {
param: Parameter(param, default=default) for param, default in params.items()
}
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
members = {
"__module__": str(modname),
"__doc__": func.__doc__,
"n_inputs": len(inputs),
"n_outputs": special_params.pop("n_outputs", 1),
"evaluate": staticmethod(func),
"_settable_properties": settable_params,
}
if fit_deriv is not None:
members["fit_deriv"] = staticmethod(fit_deriv)
members.update(params)
cls = type(model_name, (FittableModel,), members)
cls._separable = True if (len(inputs) == 1) else False
return cls
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from
``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError("If no bounding_box is set, coords or arr must be input.")
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError(
"number of array dimensions inconsistent with number of model inputs."
)
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError(
"coordinate length inconsistent with the number of model inputs."
)
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError("coordinate shape inconsistent with the array shape.")
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input"
" arr in one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def hide_inverse(model):
"""
This is a convenience function intended to disable automatic generation
of the inverse in compound models by disabling one of the constituent
model's inverse. This is to handle cases where user provided inverse
functions are not compatible within an expression.
Example:
compound_model.inverse = hide_inverse(m1) + m2 + m3
This will insure that the defined inverse itself won't attempt to
build its own inverse, which would otherwise fail in this example
(e.g., m = m1 + m2 + m3 happens to raises an exception for this
reason.)
Note that this permanently disables it. To prevent that either copy
the model or restore the inverse later.
"""
del model.inverse
return model
|
99009f355f2d57b5c007362ca5aa6b13230d8f96f544d1b428c0c67a0ebb43bf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sundry function and class decorators."""
import functools
import inspect
import textwrap
import threading
import types
import warnings
from inspect import signature
from .exceptions import (
AstropyDeprecationWarning,
AstropyPendingDeprecationWarning,
AstropyUserWarning,
)
__all__ = [
"classproperty",
"deprecated",
"deprecated_attribute",
"deprecated_renamed_argument",
"format_doc",
"lazyproperty",
"sharedmethod",
]
_NotFound = object()
def deprecated(
since,
message="",
name="",
alternative="",
pending=False,
obj_type=None,
warning_type=AstropyDeprecationWarning,
):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
``warning_type``.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
warning_type : Warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ""
old_doc = textwrap.dedent(old_doc).strip("\n")
new_doc = f"\n.. deprecated:: {since}\n {message.strip()}\n\n" + old_doc
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r"\ "
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message, warning_type=warning_type):
"""
Returns a wrapped function that displays ``warning_type``
when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f # noqa: E731
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = warning_type
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__["__add__"]): # noqa: E721
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message, warning_type=warning_type):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(
get_function(cls.__init__), message, warning_type
)
else:
cls.__new__ = deprecate_function(
get_function(cls.__new__), message, warning_type
)
return cls
def deprecate(
obj,
message=message,
name=name,
alternative=alternative,
pending=pending,
warning_type=warning_type,
):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = "class"
elif inspect.isfunction(obj):
obj_type_name = "function"
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = "method"
else:
obj_type_name = "object"
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ""
if not message or type(message) is type(deprecate):
if pending:
message = (
"The {func} {obj_type} will be deprecated in a future version."
)
else:
message = (
"The {func} {obj_type} is deprecated and may "
"be removed in a future version."
)
if alternative:
altmessage = f"\n Use {alternative} instead."
message = (
message.format(
**{
"func": name,
"name": name,
"alternative": alternative,
"obj_type": obj_type_name,
}
)
) + altmessage
if isinstance(obj, type):
return deprecate_class(obj, message, warning_type)
else:
return deprecate_function(obj, message, warning_type)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(
name,
since,
message=None,
alternative=None,
pending=False,
warning_type=AstropyDeprecationWarning,
):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. ``self._name``), or set an alternative explicitly.
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``name`` may be used for the name of the attribute,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of
``warning_type``.
warning_type : Warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = deprecated_attribute("old_name", "0.1")
def method(self):
self._old_name = 42
class MyClass2:
old_name = deprecated_attribute(
"old_name", "1.2", alternative="new_name"
)
def method(self):
self.new_name = 24
"""
private_name = alternative or "_" + name
specific_deprecated = deprecated(
since,
name=name,
obj_type="attribute",
message=message,
alternative=alternative,
pending=pending,
warning_type=warning_type,
)
@specific_deprecated
def get(self):
return getattr(self, private_name)
@specific_deprecated
def set(self, val):
setattr(self, private_name, val)
@specific_deprecated
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
def deprecated_renamed_argument(
old_name,
new_name,
since,
arg_in_kwargs=False,
relax=False,
pending=False,
warning_type=AstropyDeprecationWarning,
alternative="",
message="",
):
"""Deprecate a _renamed_ or _removed_ function argument.
The decorator assumes that the argument with the ``old_name`` was removed
from the function signature and the ``new_name`` replaced it at the
**same position** in the signature. If the ``old_name`` argument is
given when calling the decorated function the decorator will catch it and
issue a deprecation warning and pass it on as ``new_name`` argument.
Parameters
----------
old_name : str or sequence of str
The old name of the argument.
new_name : str or sequence of str or None
The new name of the argument. Set this to `None` to remove the
argument ``old_name`` instead of renaming it.
since : str or number or sequence of str or number
The release at which the old argument became deprecated.
arg_in_kwargs : bool or sequence of bool, optional
If the argument is not a named argument (for example it
was meant to be consumed by ``**kwargs``) set this to
``True``. Otherwise the decorator will throw an Exception
if the ``new_name`` cannot be found in the signature of
the decorated function.
Default is ``False``.
relax : bool or sequence of bool, optional
If ``False`` a ``TypeError`` is raised if both ``new_name`` and
``old_name`` are given. If ``True`` the value for ``new_name`` is used
and a Warning is issued.
Default is ``False``.
pending : bool or sequence of bool, optional
If ``True`` this will hide the deprecation warning and ignore the
corresponding ``relax`` parameter value.
Default is ``False``.
warning_type : Warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object if ``new_name`` is None. The deprecation
warning will tell the user about this alternative if provided.
message : str, optional
A custom warning message. If provided then ``since`` and
``alternative`` options will have no effect.
Raises
------
TypeError
If the new argument name cannot be found in the function
signature and arg_in_kwargs was False or if it is used to
deprecate the name of the ``*args``-, ``**kwargs``-like arguments.
At runtime such an Error is raised if both the new_name
and old_name were specified when calling the function and
"relax=False".
Notes
-----
The decorator should be applied to a function where the **name**
of an argument was changed but it applies the same logic.
.. warning::
If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must
also be a list or tuple with the same number of entries. ``relax`` and
``arg_in_kwarg`` can be a single bool (applied to all) or also a
list/tuple with the same number of entries like ``new_name``, etc.
Examples
--------
The deprecation warnings are not shown in the following examples.
To deprecate a positional or keyword argument::
>>> from astropy.utils.decorators import deprecated_renamed_argument
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0')
... def test(sigma):
... return sigma
>>> test(2)
2
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
To deprecate an argument caught inside the ``**kwargs`` the
``arg_in_kwargs`` has to be set::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0',
... arg_in_kwargs=True)
... def test(**kwargs):
... return kwargs['sigma']
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
By default providing the new and old keyword will lead to an Exception. If
a Warning is desired set the ``relax`` argument::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True)
... def test(sigma):
... return sigma
>>> test(sig=2) # doctest: +SKIP
2
It is also possible to replace multiple arguments. The ``old_name``,
``new_name`` and ``since`` have to be `tuple` or `list` and contain the
same number of entries::
>>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'],
... ['1.0', 1.2])
... def test(alpha, beta):
... return alpha, beta
>>> test(a=2, b=3) # doctest: +SKIP
(2, 3)
In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which
is applied to all renamed arguments) or must also be a `tuple` or `list`
with values for each of the arguments.
"""
cls_iter = (list, tuple)
if isinstance(old_name, cls_iter):
n = len(old_name)
# Assume that new_name and since are correct (tuple/list with the
# appropriate length) in the spirit of the "consenting adults". But the
# optional parameters may not be set, so if these are not iterables
# wrap them.
if not isinstance(arg_in_kwargs, cls_iter):
arg_in_kwargs = [arg_in_kwargs] * n
if not isinstance(relax, cls_iter):
relax = [relax] * n
if not isinstance(pending, cls_iter):
pending = [pending] * n
if not isinstance(message, cls_iter):
message = [message] * n
else:
# To allow a uniform approach later on, wrap all arguments in lists.
n = 1
old_name = [old_name]
new_name = [new_name]
since = [since]
arg_in_kwargs = [arg_in_kwargs]
relax = [relax]
pending = [pending]
message = [message]
def decorator(function):
# The named arguments of the function.
arguments = signature(function).parameters
keys = list(arguments.keys())
position = [None] * n
for i in range(n):
# Determine the position of the argument.
if arg_in_kwargs[i]:
pass
else:
if new_name[i] is None:
param = arguments[old_name[i]]
elif new_name[i] in arguments:
param = arguments[new_name[i]]
# In case the argument is not found in the list of arguments
# the only remaining possibility is that it should be caught
# by some kind of **kwargs argument.
# This case has to be explicitly specified, otherwise throw
# an exception!
else:
raise TypeError(
f'"{new_name[i]}" was not specified in the function '
"signature. If it was meant to be part of "
'"**kwargs" then set "arg_in_kwargs" to "True"'
)
# There are several possibilities now:
# 1.) Positional or keyword argument:
if param.kind == param.POSITIONAL_OR_KEYWORD:
if new_name[i] is None:
position[i] = keys.index(old_name[i])
else:
position[i] = keys.index(new_name[i])
# 2.) Keyword only argument:
elif param.kind == param.KEYWORD_ONLY:
# These cannot be specified by position.
position[i] = None
# 3.) positional-only argument, varargs, varkwargs or some
# unknown type:
else:
raise TypeError(
f'cannot replace argument "{new_name[i]}" '
f"of kind {repr(param.kind)}."
)
@functools.wraps(function)
def wrapper(*args, **kwargs):
for i in range(n):
msg = message[i] or (
f'"{old_name[i]}" was deprecated in '
f"version {since[i]} and will be removed "
"in a future version. "
)
# The only way to have oldkeyword inside the function is
# that it is passed as kwarg because the oldkeyword
# parameter was renamed to newkeyword.
if old_name[i] in kwargs:
value = kwargs.pop(old_name[i])
# Display the deprecation warning only when it's not
# pending.
if not pending[i]:
if not message[i]:
if new_name[i] is not None:
msg += f'Use argument "{new_name[i]}" instead.'
elif alternative:
msg += f"\n Use {alternative} instead."
warnings.warn(msg, warning_type, stacklevel=2)
# Check if the newkeyword was given as well.
newarg_in_args = position[i] is not None and len(args) > position[i]
newarg_in_kwargs = new_name[i] in kwargs
if newarg_in_args or newarg_in_kwargs:
if not pending[i]:
# If both are given print a Warning if relax is
# True or raise an Exception is relax is False.
if relax[i]:
warnings.warn(
f'"{old_name[i]}" and "{new_name[i]}" '
"keywords were set. "
f'Using the value of "{new_name[i]}".',
AstropyUserWarning,
)
else:
raise TypeError(
f'cannot specify both "{old_name[i]}" and '
f'"{new_name[i]}".'
)
else:
# Pass the value of the old argument with the
# name of the new argument to the function
if new_name[i] is not None:
kwargs[new_name[i]] = value
# If old argument has no replacement, cast it back.
# https://github.com/astropy/astropy/issues/9914
else:
kwargs[old_name[i]] = value
# Deprecated keyword without replacement is given as
# positional argument.
elif (
not pending[i]
and not new_name[i]
and position[i]
and len(args) > position[i]
):
if alternative and not message[i]:
msg += f"\n Use {alternative} instead."
warnings.warn(msg, warning_type, stacklevel=2)
return function(*args, **kwargs)
return wrapper
return decorator
# TODO: This can still be made to work for setters by implementing an
# accompanying metaclass that supports it; we just don't need that right this
# second
class classproperty(property):
"""
Similar to `property`, but allows class-level properties. That is,
a property whose getter is like a `classmethod`.
The wrapped method may explicitly use the `classmethod` decorator (which
must become before this decorator), or the `classmethod` may be omitted
(it is implicit through use of this decorator).
.. note::
classproperty only works for *read-only* properties. It does not
currently allow writeable/deletable properties, due to subtleties of how
Python descriptors work. In order to implement such properties on a class
a metaclass for that class must be implemented.
Parameters
----------
fget : callable
The function that computes the value of this property (in particular,
the function when this is used as a decorator) a la `property`.
doc : str, optional
The docstring for the property--by default inherited from the getter
function.
lazy : bool, optional
If True, caches the value returned by the first call to the getter
function, so that it is only called once (used for lazy evaluation
of an attribute). This is analogous to `lazyproperty`. The ``lazy``
argument can also be used when `classproperty` is used as a decorator
(see the third example below). When used in the decorator syntax this
*must* be passed in as a keyword argument.
Examples
--------
::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal + 1
...
>>> Foo.bar
2
>>> foo_instance = Foo()
>>> foo_instance.bar
2
>>> foo_instance._bar_internal = 2
>>> foo_instance.bar # Ignores instance attributes
2
As previously noted, a `classproperty` is limited to implementing
read-only attributes::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal
... @bar.setter
... def bar(cls, value):
... cls._bar_internal = value
...
Traceback (most recent call last):
...
NotImplementedError: classproperty can only be read-only; use a
metaclass to implement modifiable class-level properties
When the ``lazy`` option is used, the getter is only called once::
>>> class Foo:
... @classproperty(lazy=True)
... def bar(cls):
... print("Performing complicated calculation")
... return 1
...
>>> Foo.bar
Performing complicated calculation
1
>>> Foo.bar
1
If a subclass inherits a lazy `classproperty` the property is still
re-evaluated for the subclass::
>>> class FooSub(Foo):
... pass
...
>>> FooSub.bar
Performing complicated calculation
1
>>> FooSub.bar
1
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
# Being used as a decorator--return a wrapper that implements
# decorator syntax
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._lock = threading.RLock() # Protects _cache
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
# There is a buglet in Python where self.__doc__ doesn't
# get set properly on instances of property subclasses if
# the doc argument was used rather than taking the docstring
# from fget
# Related Python issue: https://bugs.python.org/issue24766
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
if self._lazy:
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread initialised before we locked.
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
val = self.fget.__wrapped__(objtype)
self._cache[objtype] = val
else:
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
val = self.fget.__wrapped__(objtype)
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties"
)
def deleter(self, fdel):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties"
)
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
# Using stock functools.wraps instead of the fancier version
# found later in this module, which is overkill for this purpose
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
# Adapted from the recipe at
# http://code.activestate.com/recipes/363602-lazy-property-evaluation
class lazyproperty(property):
"""
Works similarly to property(), but computes the value only once.
This essentially memorizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest:
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
As the example shows, the second time ``complicated_property`` is accessed,
the ``print`` statement is not executed. Only the return value from the
first access off ``complicated_property`` is returned.
By default, a setter and deleter are used which simply overwrite and
delete, respectively, the value stored in ``__dict__``. Any user-specified
setter or deleter is executed before executing these default actions.
The one exception is that the default setter is not run if the user setter
already sets the new value in ``__dict__`` and returns that value and the
returned value is not ``None``.
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
super().__init__(fget, fset, fdel, doc)
self._key = self.fget.__name__
self._lock = threading.RLock()
def __get__(self, obj, owner=None):
try:
obj_dict = obj.__dict__
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread beat us to it.
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
val = self.fget(obj)
obj_dict[self._key] = val
return val
except AttributeError:
if obj is None:
return self
raise
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self.fset:
ret = self.fset(obj, val)
if ret is not None and obj_dict.get(self._key) is ret:
# By returning the value set the setter signals that it
# took over setting the value in obj.__dict__; this
# mechanism allows it to override the input value
return
obj_dict[self._key] = val
def __delete__(self, obj):
if self.fdel:
self.fdel(obj)
obj.__dict__.pop(self._key, None) # Delete if present
class sharedmethod(classmethod):
"""
This is a method decorator that allows both an instancemethod and a
`classmethod` to share the same name.
When using `sharedmethod` on a method defined in a class's body, it
may be called on an instance, or on a class. In the former case it
behaves like a normal instance method (a reference to the instance is
automatically passed as the first ``self`` argument of the method)::
>>> class Example:
... @sharedmethod
... def identify(self, *args):
... print('self was', self)
... print('additional args were', args)
...
>>> ex = Example()
>>> ex.identify(1, 2)
self was <astropy.utils.decorators.Example object at 0x...>
additional args were (1, 2)
In the latter case, when the `sharedmethod` is called directly from a
class, it behaves like a `classmethod`::
>>> Example.identify(3, 4)
self was <class 'astropy.utils.decorators.Example'>
additional args were (3, 4)
This also supports a more advanced usage, where the `classmethod`
implementation can be written separately. If the class's *metaclass*
has a method of the same name as the `sharedmethod`, the version on
the metaclass is delegated to::
>>> class ExampleMeta(type):
... def identify(self):
... print('this implements the {0}.identify '
... 'classmethod'.format(self.__name__))
...
>>> class Example(metaclass=ExampleMeta):
... @sharedmethod
... def identify(self):
... print('this implements the instancemethod')
...
>>> Example().identify()
this implements the instancemethod
>>> Example.identify()
this implements the Example.identify classmethod
"""
def __get__(self, obj, objtype=None):
if obj is None:
mcls = type(objtype)
clsmeth = getattr(mcls, self.__func__.__name__, None)
if callable(clsmeth):
func = clsmeth
else:
func = self.__func__
return self._make_method(func, objtype)
else:
return self._make_method(self.__func__, obj)
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance)
def format_doc(docstring, *args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
The formatting works like :meth:`str.format` and if the decorated object
already has a docstring this docstring can be included in the new
documentation if you use the ``{__doc__}`` placeholder.
Its primary use is for reusing a *long* docstring in multiple functions
when it is the same or only slightly different between them.
Parameters
----------
docstring : str or object or None
The docstring that will replace the docstring of the decorated
object. If it is an object like a function or class it will
take the docstring of this object. If it is a string it will use the
string itself. One special case is if the string is ``None`` then
it will use the decorated functions docstring and formats it.
args :
passed to :meth:`str.format`.
kwargs :
passed to :meth:`str.format`. If the function has a (not empty)
docstring the original docstring is added to the kwargs with the
keyword ``'__doc__'``.
Raises
------
ValueError
If the ``docstring`` (or interpreted docstring if it was ``None``
or not a string) is empty.
IndexError, KeyError
If a placeholder in the (interpreted) ``docstring`` was not filled. see
:meth:`str.format` for more information.
Notes
-----
Using this decorator allows, for example Sphinx, to parse the
correct docstring.
Examples
--------
Replacing the current docstring is very easy::
>>> from astropy.utils.decorators import format_doc
>>> @format_doc('''Perform num1 + num2''')
... def add(num1, num2):
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform num1 + num2
sometimes instead of replacing you only want to add to it::
>>> doc = '''
... {__doc__}
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... '''
>>> @format_doc(doc)
... def add(num1, num2):
... '''Perform addition.'''
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
in case one might want to format it further::
>>> doc = '''
... Perform {0}.
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... result of num1 {op} num2
... {__doc__}
... '''
>>> @format_doc(doc, 'addition', op='+')
... def add(num1, num2):
... return num1+num2
...
>>> @format_doc(doc, 'subtraction', op='-')
... def subtract(num1, num2):
... '''Notes: This one has additional notes.'''
... return num1-num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
>>> help(subtract) # doctest: +SKIP
Help on function subtract in module __main__:
<BLANKLINE>
subtract(num1, num2)
Perform subtraction.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 - num2
Notes : This one has additional notes.
These methods can be combined; even taking the docstring from another
object is possible as docstring attribute. You just have to specify the
object::
>>> @format_doc(add)
... def another_add(num1, num2):
... return num1 + num2
...
>>> help(another_add) # doctest: +SKIP
Help on function another_add in module __main__:
<BLANKLINE>
another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
But be aware that this decorator *only* formats the given docstring not
the strings passed as ``args`` or ``kwargs`` (not even the original
docstring)::
>>> @format_doc(doc, 'addition', op='+')
... def yet_another_add(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(yet_another_add) # doctest: +SKIP
Help on function yet_another_add in module __main__:
<BLANKLINE>
yet_another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
This one is good for {0}.
To work around it you could specify the docstring to be ``None``::
>>> @format_doc(None, 'addition')
... def last_add_i_swear(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(last_add_i_swear) # doctest: +SKIP
Help on function last_add_i_swear in module __main__:
<BLANKLINE>
last_add_i_swear(num1, num2)
This one is good for addition.
Using it with ``None`` as docstring allows to use the decorator twice
on an object to first parse the new docstring and then to parse the
original docstring or the ``args`` and ``kwargs``.
"""
def set_docstring(obj):
if docstring is None:
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
elif isinstance(docstring, str):
# String: use the string that was given
doc = docstring
else:
# Something else: Use the __doc__ of this
doc = docstring.__doc__
if not doc:
# In case the docstring is empty it's probably not what was wanted.
raise ValueError(
"docstring must be a string or containing a "
"docstring that is not empty."
)
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs["__doc__"] = obj.__doc__ or ""
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
|
0e6bd6825806263cb4a38f112e556e7b04ead3cc22706ffa0fa039dc0e6bfa53 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for accessing, downloading, and caching data files."""
import atexit
import contextlib
import errno
import fnmatch
import ftplib
import functools
import hashlib
import io
import os
import re
import shutil
# import ssl moved inside functions using ssl to avoid import failure
# when running in pyodide/Emscripten
import sys
import urllib.error
import urllib.parse
import urllib.request
import zipfile
from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir, mkdtemp
from warnings import warn
try:
import certifi
except ImportError:
# certifi support is optional; when available it will be used for TLS/SSL
# downloads
certifi = None
import astropy.config.paths
from astropy import config as _config
from astropy.utils.compat.optional_deps import HAS_FSSPEC
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.introspection import find_current_module, resolve_name
# Order here determines order in the autosummary
__all__ = [
"Conf",
"conf",
"download_file",
"download_files_in_parallel",
"get_readable_fileobj",
"get_pkg_data_fileobj",
"get_pkg_data_filename",
"get_pkg_data_contents",
"get_pkg_data_fileobjs",
"get_pkg_data_filenames",
"get_pkg_data_path",
"is_url",
"is_url_in_cache",
"get_cached_urls",
"cache_total_size",
"cache_contents",
"export_download_cache",
"import_download_cache",
"import_file_to_cache",
"check_download_cache",
"clear_download_cache",
"compute_hash",
"get_free_space_in_dir",
"check_free_space_in_dir",
"get_file_contents",
"CacheMissingWarning",
"CacheDamaged",
]
_dataurls_to_alias = {}
class _NonClosingBufferedReader(io.BufferedReader):
def __del__(self):
try:
# NOTE: self.raw will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __del__(self):
try:
# NOTE: self.stream will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.data`.
"""
dataurl = _config.ConfigItem(
"http://data.astropy.org/", "Primary URL for astropy remote data site."
)
dataurl_mirror = _config.ConfigItem(
"http://www.astropy.org/astropy-data/",
"Mirror URL for astropy remote data site.",
)
default_http_user_agent = _config.ConfigItem(
"astropy",
"Default User-Agent for HTTP request headers. This can be overwritten "
"for a particular call via http_headers option, where available. "
"This only provides the default value when not set by https_headers.",
)
remote_timeout = _config.ConfigItem(
10.0,
"Time to wait for remote data queries (in seconds).",
aliases=["astropy.coordinates.name_resolve.name_resolve_timeout"],
)
allow_internet = _config.ConfigItem(
True, "If False, prevents any attempt to download from Internet."
)
compute_hash_block_size = _config.ConfigItem(
2**16, "Block size for computing file hashes." # 64K
)
download_block_size = _config.ConfigItem(
2**16, "Number of bytes of remote data to download per step." # 64K
)
delete_temporary_downloads_at_exit = _config.ConfigItem(
True,
"If True, temporary download files created when the cache is "
"inaccessible will be deleted at the end of the python session.",
)
conf = Conf()
class CacheMissingWarning(AstropyWarning):
"""
This warning indicates the standard cache directory is not accessible, with
the first argument providing the warning message. If args[1] is present, it
is a filename indicating the path to a temporary file that was created to
store a remote data download in the absence of the cache.
"""
def is_url(string):
"""
Test whether a string is a valid URL for :func:`download_file`.
Parameters
----------
string : str
The string to test.
Returns
-------
status : bool
String is URL or not.
"""
url = urllib.parse.urlparse(string)
# we can't just check that url.scheme is not an empty string, because
# file paths in windows would return a non-empty scheme (e.g. e:\\
# returns 'e').
return url.scheme.lower() in ["http", "https", "ftp", "sftp", "ssh", "file"]
# Backward compatibility because some downstream packages allegedly uses it.
_is_url = is_url
def _requires_fsspec(url):
"""Does the `url` require the optional ``fsspec`` dependency to open?"""
return isinstance(url, str) and url.startswith(("s3://", "gs://"))
def _is_inside(path, parent_path):
# We have to try realpath too to avoid issues with symlinks, but we leave
# abspath because some systems like debian have the absolute path (with no
# symlinks followed) match, but the real directories in different
# locations, so need to try both cases.
return os.path.abspath(path).startswith(
os.path.abspath(parent_path)
) or os.path.realpath(path).startswith(os.path.realpath(parent_path))
@contextlib.contextmanager
def get_readable_fileobj(
name_or_obj,
encoding=None,
cache=False,
show_progress=True,
remote_timeout=None,
sources=None,
http_headers=None,
*,
use_fsspec=None,
fsspec_kwargs=None,
close_files=True,
):
"""Yield a readable, seekable file-like object from a file or URL.
This supports passing filenames, URLs, and readable file-like objects,
any of which can be compressed in gzip, bzip2 or lzma (xz) if the
appropriate compression libraries are provided by the Python installation.
Notes
-----
This function is a context manager, and should be used for example
as::
with get_readable_fileobj('file.dat') as f:
contents = f.read()
If a URL is provided and the cache is in use, the provided URL will be the
name used in the cache. The contents may already be stored in the cache
under this URL provided, they may be downloaded from this URL, or they may
be downloaded from one of the locations listed in ``sources``. See
`~download_file` for details.
Parameters
----------
name_or_obj : str or file-like
The filename of the file to access (if given as a string), or
the file-like object to access.
If a file-like object, it must be opened in binary mode.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
check the remote URL for a new version but store the result
in the cache.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name_or_obj`` starts with the Amazon S3 storage prefix ``s3://``
or the Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g. ``http://``) but in this case you must
explicitely pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
close_files : bool, optional
Close the file object when exiting the context manager.
Default is `True`.
.. versionadded:: 5.2
Returns
-------
file : readable file-like
"""
# close_fds is a list of file handles created by this function
# that need to be closed. We don't want to always just close the
# returned file handle, because it may simply be the file handle
# passed in. In that case it is not the responsibility of this
# function to close it: doing so could result in a "double close"
# and an "invalid file descriptor" exception.
close_fds = []
delete_fds = []
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
# Have `use_fsspec` default to ``True`` if the user passed an Amazon S3
# or Google Cloud Storage URI.
if use_fsspec is None and _requires_fsspec(name_or_obj):
use_fsspec = True
if use_fsspec:
if not isinstance(name_or_obj, str):
raise TypeError("`name_or_obj` must be a string when `use_fsspec=True`")
if fsspec_kwargs is None:
fsspec_kwargs = {}
# name_or_obj could be an os.PathLike object
if isinstance(name_or_obj, os.PathLike):
name_or_obj = os.fspath(name_or_obj)
# Get a file object to the content
if isinstance(name_or_obj, str):
# Use fsspec to open certain cloud-hosted files (e.g., AWS S3, Google Cloud Storage)
if use_fsspec:
if not HAS_FSSPEC:
raise ModuleNotFoundError("please install `fsspec` to open this file")
import fsspec # local import because it is a niche dependency
openfileobj = fsspec.open(name_or_obj, **fsspec_kwargs)
close_fds.append(openfileobj)
fileobj = openfileobj.open()
close_fds.append(fileobj)
else:
is_url = _is_url(name_or_obj)
if is_url:
name_or_obj = download_file(
name_or_obj,
cache=cache,
show_progress=show_progress,
timeout=remote_timeout,
sources=sources,
http_headers=http_headers,
)
fileobj = io.FileIO(name_or_obj, "r")
if is_url and not cache:
delete_fds.append(fileobj)
close_fds.append(fileobj)
else:
fileobj = name_or_obj
# Check if the file object supports random access, and if not,
# then wrap it in a BytesIO buffer. It would be nicer to use a
# BufferedReader to avoid reading loading the whole file first,
# but that might not be compatible with all possible I/O classes.
if not hasattr(fileobj, "seek"):
try:
# py.path.LocalPath objects have .read() method but it uses
# text mode, which won't work. .read_binary() does, and
# surely other ducks would return binary contents when
# called like this.
# py.path.LocalPath is what comes from the legacy tmpdir fixture
# in pytest.
fileobj = io.BytesIO(fileobj.read_binary())
except AttributeError:
fileobj = io.BytesIO(fileobj.read())
# Now read enough bytes to look at signature
signature = fileobj.read(4)
fileobj.seek(0)
if signature[:3] == b"\x1f\x8b\x08": # gzip
import struct
try:
import gzip
fileobj_new = gzip.GzipFile(fileobj=fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really gzip
except (OSError, EOFError, struct.error): # invalid gzip file
fileobj.seek(0)
fileobj_new.close()
else:
fileobj_new.seek(0)
fileobj = fileobj_new
elif signature[:3] == b"BZh": # bzip2
try:
import bz2
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
try:
# bz2.BZ2File does not support file objects, only filenames, so we
# need to write the data to a temporary file
with NamedTemporaryFile("wb", delete=False) as tmp:
tmp.write(fileobj.read())
tmp.close()
fileobj_new = bz2.BZ2File(tmp.name, mode="rb")
fileobj_new.read(1) # need to check that the file is really bzip2
except OSError: # invalid bzip2 file
fileobj.seek(0)
fileobj_new.close()
# raise
else:
fileobj_new.seek(0)
close_fds.append(fileobj_new)
fileobj = fileobj_new
elif signature[:3] == b"\xfd7z": # xz
try:
import lzma
fileobj_new = lzma.LZMAFile(fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really xz
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the lzma module."
)
except (OSError, EOFError): # invalid xz file
fileobj.seek(0)
fileobj_new.close()
# should we propagate this to the caller to signal bad content?
# raise ValueError(e)
else:
fileobj_new.seek(0)
fileobj = fileobj_new
# By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File
# or lzma.LZMAFile instance opened in binary mode (that is, read
# returns bytes). Now we need to, if requested, wrap it in a
# io.TextIOWrapper so read will return unicode based on the
# encoding parameter.
needs_textio_wrapper = encoding != "binary"
if needs_textio_wrapper:
# A bz2.BZ2File can not be wrapped by a TextIOWrapper,
# so we decompress it to a temporary file and then
# return a handle to that.
try:
import bz2
except ImportError:
pass
else:
if isinstance(fileobj, bz2.BZ2File):
tmp = NamedTemporaryFile("wb", delete=False)
data = fileobj.read()
tmp.write(data)
tmp.close()
delete_fds.append(tmp)
fileobj = io.FileIO(tmp.name, "r")
close_fds.append(fileobj)
fileobj = _NonClosingBufferedReader(fileobj)
fileobj = _NonClosingTextIOWrapper(fileobj, encoding=encoding)
# Ensure that file is at the start - io.FileIO will for
# example not always be at the start:
# >>> import io
# >>> f = open('test.fits', 'rb')
# >>> f.read(4)
# 'SIMP'
# >>> f.seek(0)
# >>> fileobj = io.FileIO(f.fileno())
# >>> fileobj.tell()
# 4096L
fileobj.seek(0)
try:
yield fileobj
finally:
if close_files:
for fd in close_fds:
fd.close()
for fd in delete_fds:
os.remove(fd.name)
def get_file_contents(*args, **kwargs):
"""
Retrieves the contents of a filename or file-like object.
See the `get_readable_fileobj` docstring for details on parameters.
Returns
-------
object
The content of the file (as requested by ``encoding``).
"""
with get_readable_fileobj(*args, **kwargs) as f:
return f.read()
@contextlib.contextmanager
def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations for the package and
provides the file as a file-like object that reads bytes.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
fileobj : file-like
An object with the contents of the data file available via
``read`` function. Can be used as part of a ``with`` statement,
automatically closing itself after the ``with`` block.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Examples
--------
This will retrieve a data file and its contents for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('data/3d_cd.hdr',
... package='astropy.wcs.tests') as fobj:
... fcontents = fobj.read()
...
This next example would download a data file from the astropy data server
because the ``allsky/allsky_rosat.fits`` file is not present in the
source distribution. It will also save the file locally so the
next time it is accessed it won't need to be downloaded.::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
This does the same thing but does *not* cache it locally::
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_filename : returns a local name for a file containing the data
""" # noqa: E501
datafn = get_pkg_data_path(data_name, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
with get_readable_fileobj(datafn, encoding=encoding) as fileobj:
yield fileobj
else: # remote file
with get_readable_fileobj(
conf.dataurl + data_name,
encoding=encoding,
cache=cache,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
) as fileobj:
# We read a byte to trigger any URLErrors
fileobj.read(1)
fileobj.seek(0)
yield fileobj
def get_pkg_data_filename(
data_name, package=None, show_progress=True, remote_timeout=None
):
"""
Retrieves a data file from the standard locations for the package and
provides a local filename for the data.
This function is similar to `get_pkg_data_fileobj` but returns the
file *name* instead of a readable file-like object. This means
that this function must always cache remote files locally, unlike
`get_pkg_data_fileobj`.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for the requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Returns
-------
filename : str
A file path on the local file system corresponding to the data
requested in ``data_name``.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('data/3d_cd.hdr',
... package='astropy.wcs.tests')
>>> with open(fn) as f:
... fcontents = f.read()
...
This retrieves a data file by hash either locally or from the astropy data
server::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP
>>> with open(fn) as f:
... fcontents = f.read()
...
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_fileobj : returns a file-like object with the data
"""
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
if data_name.startswith("hash/"):
# first try looking for a local version if a hash is specified
hashfn = _find_hash_fn(data_name[5:])
if hashfn is None:
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
else:
return hashfn
else:
fs_path = os.path.normpath(data_name)
datafn = get_pkg_data_path(fs_path, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
return datafn
else: # remote file
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations and returns its
contents as a bytes object.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
* A URL to some other file.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
contents : bytes
The complete contents of the file as a bytes object.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
See Also
--------
get_pkg_data_fileobj : returns a file-like object with the data
get_pkg_data_filename : returns a local name for a file containing the data
"""
with get_pkg_data_fileobj(
data_name, package=package, encoding=encoding, cache=cache
) as fd:
contents = fd.read()
return contents
def get_pkg_data_filenames(datadir, package=None, pattern="*"):
"""
Returns the path of all of the data files in a given directory
that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``.
* Remote URLs are not currently supported.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
Returns
-------
filenames : iterator of str
Paths on the local filesystem in *datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... with open(fn) as f:
... fcontents = f.read()
...
"""
path = get_pkg_data_path(datadir, package=package)
if os.path.isfile(path):
raise OSError(
"Tried to access a data directory that's actually a package data file"
)
elif os.path.isdir(path):
for filename in os.listdir(path):
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(path, filename)
else:
raise OSError("Path not found")
def get_pkg_data_fileobjs(datadir, package=None, pattern="*", encoding=None):
"""
Returns readable file objects for all of the data files in a given
directory that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``
* Remote URLs are not currently supported
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
Returns
-------
fileobjs : iterator of file object
File objects for each of the files on the local filesystem in
*datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... fcontents = fd.read()
...
"""
for fn in get_pkg_data_filenames(datadir, package=package, pattern=pattern):
with get_readable_fileobj(fn, encoding=encoding) as fd:
yield fd
def compute_hash(localfn):
"""Computes the MD5 hash for a file.
The hash for a data file is used for looking up data files in a unique
fashion. This is of particular use for tests; a test may require a
particular version of a particular file, in which case it can be accessed
via hash to get the appropriate version.
Typically, if you wish to write a test that requires a particular data
file, you will want to submit that file to the astropy data servers, and
use
e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``,
but with the hash for your file in place of the hash in the example.
Parameters
----------
localfn : str
The path to the file for which the hash should be generated.
Returns
-------
hash : str
The hex digest of the cryptographic hash for the contents of the
``localfn`` file.
"""
with open(localfn, "rb") as f:
h = hashlib.md5()
block = f.read(conf.compute_hash_block_size)
while block:
h.update(block)
block = f.read(conf.compute_hash_block_size)
return h.hexdigest()
def get_pkg_data_path(*path, package=None):
"""Get path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings for ``os.path`` joining.
package : str or None, optional, keyword-only
If specified, look for a file relative to the given package, rather
than the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
Raises
------
ImportError
Given package or module is not importable.
RuntimeError
If the local data file is outside of the package's tree.
"""
if package is None:
module = find_current_module(1, finddiff=["astropy.utils.data", "contextlib"])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, "__package__") or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if "." in module.__name__:
package = module.__name__.rpartition(".")[0]
else:
package = module.__name__
else:
package = module.__package__
else:
# package errors if it isn't a str
# so there is no need for checks in the containing if/else
module = resolve_name(package)
# module path within package
module_path = os.path.dirname(module.__file__)
full_path = os.path.join(module_path, *path)
# Check that file is inside tree.
rootpkgname = package.partition(".")[0]
rootpkg = resolve_name(rootpkgname)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(full_path, root_dir):
raise RuntimeError(
f"attempted to get a local data file outside of the {rootpkgname} tree."
)
return full_path
def _find_hash_fn(hexdigest, pkgname="astropy"):
"""
Looks for a local file by hash - returns file name if found and a valid
file, otherwise returns None.
"""
for v in cache_contents(pkgname=pkgname).values():
if compute_hash(v) == hexdigest:
return v
return None
def get_free_space_in_dir(path, unit=False):
"""
Given a path to a directory, returns the amount of free space
on that filesystem.
Parameters
----------
path : str
The path to a directory.
unit : bool or `~astropy.units.Unit`
Return the amount of free space as Quantity in the given unit,
if provided. Default is `False` for backward-compatibility.
Returns
-------
free_space : int or `~astropy.units.Quantity`
The amount of free space on the partition that the directory is on.
If ``unit=False``, it is returned as plain integer (in bytes).
"""
if not os.path.isdir(path):
raise OSError(
"Can only determine free space associated with directories, not files."
)
# Actually you can on Linux but I want to avoid code that fails
# on Windows only.
free_space = shutil.disk_usage(path).free
if unit:
from astropy import units as u
# TODO: Automatically determine best prefix to use.
if unit is True:
unit = u.byte
free_space = u.Quantity(free_space, u.byte).to(unit)
return free_space
def check_free_space_in_dir(path, size):
"""
Determines if a given directory has enough space to hold a file of
a given size.
Parameters
----------
path : str
The path to a directory.
size : int or `~astropy.units.Quantity`
A proposed filesize. If not a Quantity, assume it is in bytes.
Raises
------
OSError
There is not enough room on the filesystem.
"""
space = get_free_space_in_dir(path, unit=getattr(size, "unit", False))
if space < size:
from astropy.utils.console import human_file_size
raise OSError(
f"Not enough free space in {path} "
f"to download a {human_file_size(size)} file, "
f"only {human_file_size(space)} left"
)
class _ftptlswrapper(urllib.request.ftpwrapper):
def init(self):
self.busy = 0
self.ftp = ftplib.FTP_TLS()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
self.ftp.prot_p()
_target = "/".join(self.dirs)
self.ftp.cwd(_target)
class _FTPTLSHandler(urllib.request.FTPHandler):
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
return _ftptlswrapper(user, passwd, host, port, dirs, timeout, persistent=False)
@functools.lru_cache
def _build_urlopener(ftp_tls=False, ssl_context=None, allow_insecure=False):
"""
Helper for building a `urllib.request.build_opener` which handles TLS/SSL.
"""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
ssl_context = dict(it for it in ssl_context) if ssl_context else {}
cert_chain = {}
if "certfile" in ssl_context:
cert_chain.update(
{
"certfile": ssl_context.pop("certfile"),
"keyfile": ssl_context.pop("keyfile", None),
"password": ssl_context.pop("password", None),
}
)
elif "password" in ssl_context or "keyfile" in ssl_context:
raise ValueError(
"passing 'keyfile' or 'password' in the ssl_context argument "
"requires passing 'certfile' as well"
)
if "cafile" not in ssl_context and certifi is not None:
ssl_context["cafile"] = certifi.where()
ssl_context = ssl.create_default_context(**ssl_context)
if allow_insecure:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
if cert_chain:
ssl_context.load_cert_chain(**cert_chain)
https_handler = urllib.request.HTTPSHandler(context=ssl_context)
if ftp_tls:
urlopener = urllib.request.build_opener(_FTPTLSHandler(), https_handler)
else:
urlopener = urllib.request.build_opener(https_handler)
return urlopener
def _try_url_open(
source_url,
timeout=None,
http_headers=None,
ftp_tls=False,
ssl_context=None,
allow_insecure=False,
):
"""Helper for opening a URL while handling TLS/SSL verification issues."""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
# Always try first with a secure connection
# _build_urlopener uses lru_cache, so the ssl_context argument must be
# converted to a hashshable type (a set of 2-tuples)
ssl_context = frozenset(ssl_context.items() if ssl_context else [])
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=False
)
req = urllib.request.Request(source_url, headers=http_headers)
try:
return urlopener.open(req, timeout=timeout)
except urllib.error.URLError as exc:
reason = exc.reason
if (
isinstance(reason, ssl.SSLError)
and reason.reason == "CERTIFICATE_VERIFY_FAILED"
):
msg = (
f"Verification of TLS/SSL certificate at {source_url} "
"failed: this can mean either the server is "
"misconfigured or your local root CA certificates are "
"out-of-date; in the latter case this can usually be "
'addressed by installing the Python package "certifi" '
"(see the documentation for astropy.utils.data.download_url)"
)
if not allow_insecure:
msg += (
" or in both cases you can work around this by "
"passing allow_insecure=True, but only if you "
"understand the implications; the original error "
f"was: {reason}"
)
raise urllib.error.URLError(msg)
else:
msg += ". Re-trying with allow_insecure=True."
warn(msg, AstropyWarning)
# Try again with a new urlopener allowing insecure connections
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=True
)
return urlopener.open(req, timeout=timeout)
raise
def _download_file_from_source(
source_url,
show_progress=True,
timeout=None,
remote_url=None,
cache=False,
pkgname="astropy",
http_headers=None,
ftp_tls=None,
ssl_context=None,
allow_insecure=False,
):
from astropy.utils.console import ProgressBarOrSpinner
if not conf.allow_internet:
raise urllib.error.URLError(
f"URL {remote_url} was supposed to be downloaded but "
f"allow_internet is {conf.allow_internet}; "
"if this is unexpected check the astropy.cfg file for the option "
"allow_internet"
)
if remote_url is None:
remote_url = source_url
if http_headers is None:
http_headers = {}
if ftp_tls is None and urllib.parse.urlparse(remote_url).scheme == "ftp":
try:
return _download_file_from_source(
source_url,
show_progress=show_progress,
timeout=timeout,
remote_url=remote_url,
cache=cache,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=False,
)
except urllib.error.URLError as e:
# e.reason might not be a string, e.g. socket.gaierror
if str(e.reason).startswith("ftp error: error_perm"):
ftp_tls = True
else:
raise
with _try_url_open(
source_url,
timeout=timeout,
http_headers=http_headers,
ftp_tls=ftp_tls,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
) as remote:
info = remote.info()
try:
size = int(info["Content-Length"])
except (KeyError, ValueError, TypeError):
size = None
if size is not None:
check_free_space_in_dir(gettempdir(), size)
if cache:
dldir = _get_download_cache_loc(pkgname)
check_free_space_in_dir(dldir, size)
# If a user has overridden sys.stdout it might not have the
# isatty method, in that case assume it's not a tty
is_tty = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
if show_progress and is_tty:
progress_stream = sys.stdout
else:
progress_stream = io.StringIO()
if source_url == remote_url:
dlmsg = f"Downloading {remote_url}"
else:
dlmsg = f"Downloading {remote_url} from {source_url}"
with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p:
with NamedTemporaryFile(
prefix=f"astropy-download-{os.getpid()}-", delete=False
) as f:
try:
bytes_read = 0
block = remote.read(conf.download_block_size)
while block:
f.write(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(conf.download_block_size)
if size is not None and bytes_read > size:
raise urllib.error.URLError(
f"File was supposed to be {size} bytes but "
f"server provides more, at least {bytes_read} "
"bytes. Download failed."
)
if size is not None and bytes_read < size:
raise urllib.error.ContentTooShortError(
f"File was supposed to be {size} bytes but we "
f"only got {bytes_read} bytes. Download failed.",
content=None,
)
except BaseException:
if os.path.exists(f.name):
try:
os.remove(f.name)
except OSError:
pass
raise
return f.name
def download_file(
remote_url,
cache=False,
show_progress=True,
timeout=None,
sources=None,
pkgname="astropy",
http_headers=None,
ssl_context=None,
allow_insecure=False,
):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {"User-Agent": conf.default_http_user_agent, "Accept": "*/*"}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
cache = False
missing_cache = (
f"Cache directory cannot be read or created ({e}), "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(
f"Cache value '{cache}' was requested but "
"'update' is the only recognized string; "
"otherwise use a boolean"
)
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (
hasattr(e, "reason")
and hasattr(e.reason, "errno")
and e.reason.errno == 8
):
e.reason.strerror = f"{e.reason.strerror}. requested URL: {remote_url}"
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
"Please include primary URL in sources if you want it to be "
"included as a valid source."
)
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}"
) from errors[sources[0]]
if cache:
try:
return import_file_to_cache(
url_key,
f_name,
remove_original=True,
replace=(cache == "update"),
pkgname=pkgname,
)
except PermissionError as e:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only ({e}), unable to import "
f"downloaded file, providing data in temporary file {f_name} "
"instead."
)
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
def is_url_in_cache(url_key, pkgname="astropy"):
"""Check if a download for ``url_key`` is in the cache.
The provided ``url_key`` will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
Parameters
----------
url_key : str
The URL retrieved
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
in_cache : bool
`True` if a download for ``url_key`` is in the cache, `False` if not
or if the cache does not exist at all.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
return False
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
return os.path.exists(filename)
def cache_total_size(pkgname="astropy"):
"""Return the total size in bytes of all files in the cache."""
size = 0
dldir = _get_download_cache_loc(pkgname=pkgname)
for root, dirs, files in os.walk(dldir):
size += sum(os.path.getsize(os.path.join(root, name)) for name in files)
return size
def _do_download_files_in_parallel(kwargs):
with astropy.config.paths.set_temp_config(kwargs.pop("temp_config")):
with astropy.config.paths.set_temp_cache(kwargs.pop("temp_cache")):
return download_file(**kwargs)
def download_files_in_parallel(
urls,
cache="update",
show_progress=True,
timeout=None,
sources=None,
multiprocessing_start_method=None,
pkgname="astropy",
):
"""Download multiple files in parallel from the given URLs.
Blocks until all files have downloaded. The result is a list of
local file paths corresponding to the given urls.
The results will be stored in the cache under the values in ``urls`` even
if they are obtained from some other location via ``sources``. See
`~download_file` for details.
Parameters
----------
urls : list of str
The URLs to retrieve.
cache : bool or "update", optional
Whether to use the cache (default is `True`). If "update",
always download the remote URLs to see if new data is available
and store the result in cache.
.. versionchanged:: 4.0
The default was changed to ``"update"`` and setting it to
``False`` will print a Warning and set it to ``"update"`` again,
because the function will not work properly without cache. Using
``True`` will work as expected.
.. versionchanged:: 3.0
The default was changed to ``True`` and setting it to ``False``
will print a Warning and set it to ``True`` again, because the
function will not work properly without cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`)
timeout : float, optional
Timeout for each individual requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
sources : dict, optional
If provided, for each URL a list of URLs to try to obtain the
file from. The result will be stored under the original URL.
For any URL in this dictionary, the original URL will *not* be
tried unless it is in this list; this is to prevent long waits
for a primary server that is known to be inaccessible at the
moment.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
paths : list of str
The local file paths corresponding to the downloaded URLs.
Notes
-----
If a URL is unreachable, the downloading will grind to a halt and the
exception will propagate upward, but an unpredictable number of
files will have been successfully downloaded and will remain in
the cache.
"""
from .console import ProgressBar
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = {}
if not cache:
# See issue #6662, on windows won't work because the files are removed
# again before they can be used. On *NIX systems it will behave as if
# cache was set to True because multiprocessing cannot insert the items
# in the list of to-be-removed files. This could be fixed, but really,
# just use the cache, with update_cache if appropriate.
warn(
"Disabling the cache does not work because of multiprocessing, "
'it will be set to ``"update"``. You may need to manually remove '
"the cached files with clear_download_cache() afterwards.",
AstropyWarning,
)
cache = "update"
if show_progress:
progress = sys.stdout
else:
progress = io.BytesIO()
# Combine duplicate URLs
combined_urls = list(set(urls))
combined_paths = ProgressBar.map(
_do_download_files_in_parallel,
[
dict(
remote_url=u,
cache=cache,
show_progress=False,
timeout=timeout,
sources=sources.get(u, None),
pkgname=pkgname,
temp_cache=astropy.config.paths.set_temp_cache._temp_path,
temp_config=astropy.config.paths.set_temp_config._temp_path,
)
for u in combined_urls
],
file=progress,
multiprocess=True,
multiprocessing_start_method=multiprocessing_start_method,
)
paths = []
for url in urls:
paths.append(combined_paths[combined_urls.index(url)])
return paths
# This is used by download_file and _deltemps to determine the files to delete
# when the interpreter exits
_tempfilestodel = []
@atexit.register
def _deltemps():
global _tempfilestodel
if _tempfilestodel is not None:
while len(_tempfilestodel) > 0:
fn = _tempfilestodel.pop()
if os.path.isfile(fn):
try:
os.remove(fn)
except OSError:
# oh well we tried
# could be held open by some process, on Windows
pass
elif os.path.isdir(fn):
try:
shutil.rmtree(fn)
except OSError:
# couldn't get rid of it, sorry
# could be held open by some process, on Windows
pass
def clear_download_cache(hashorurl=None, pkgname="astropy"):
"""Clears the data file cache by deleting the local file(s).
If a URL is provided, it will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
For the purposes of this function, a file can also be identified by a hash
of its contents or by the filename under which the data is stored (as
returned by `~download_file`, for example).
Parameters
----------
hashorurl : str or None
If None, the whole cache is cleared. Otherwise, specify
a hash for the cached file that is supposed to be deleted,
the full path to a file in the cache that should be deleted,
or a URL that should be removed from the cache if present.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
# Problem arose when trying to open the cache
# Just a warning, though
msg = "Not clearing data cache - cache inaccessible due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return
try:
if hashorurl is None:
# Optional: delete old incompatible caches too
_rmtree(dldir)
elif _is_url(hashorurl):
filepath = os.path.join(dldir, _url_to_dirname(hashorurl))
_rmtree(filepath)
else:
# Not a URL, it should be either a filename or a hash
filepath = os.path.join(dldir, hashorurl)
rp = os.path.relpath(filepath, dldir)
if rp.startswith(".."):
raise RuntimeError(
"attempted to use clear_download_cache on the path "
f"{filepath} outside the data cache directory {dldir}"
)
d, f = os.path.split(rp)
if d and f in ["contents", "url"]:
# It's a filename not the hash of a URL
# so we want to zap the directory containing the
# files "url" and "contents"
filepath = os.path.join(dldir, d)
if os.path.exists(filepath):
_rmtree(filepath)
elif len(hashorurl) == 2 * hashlib.md5().digest_size and re.match(
r"[0-9a-f]+", hashorurl
):
# It's the hash of some file contents, we have to find the right file
filename = _find_hash_fn(hashorurl)
if filename is not None:
clear_download_cache(filename)
except OSError as e:
msg = "Not clearing data from cache - problem arose "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
def _get_download_cache_loc(pkgname="astropy"):
"""Finds the path to the cache directory and makes them if they don't exist.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
datadir : str
The path to the data cache directory.
"""
try:
datadir = os.path.join(
astropy.config.paths.get_cache_dir(pkgname), "download", "url"
)
if not os.path.exists(datadir):
try:
os.makedirs(datadir)
except OSError:
if not os.path.exists(datadir):
raise
elif not os.path.isdir(datadir):
raise OSError(f"Data cache directory {datadir} is not a directory")
return datadir
except OSError as e:
msg = "Remote data cache could not be accessed due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
raise
def _url_to_dirname(url):
if not _is_url(url):
raise ValueError(f"Malformed URL: '{url}'")
# Make domain names case-insensitive
# Also makes the http:// case-insensitive
urlobj = list(urllib.parse.urlsplit(url))
urlobj[1] = urlobj[1].lower()
if urlobj[0].lower() in ["http", "https"] and urlobj[1] and urlobj[2] == "":
urlobj[2] = "/"
url_c = urllib.parse.urlunsplit(urlobj)
return hashlib.md5(url_c.encode("utf-8")).hexdigest()
class ReadOnlyDict(dict):
def __setitem__(self, key, value):
raise TypeError("This object is read-only.")
_NOTHING = ReadOnlyDict({})
class CacheDamaged(ValueError):
"""Record the URL or file that was a problem.
Using clear_download_cache on the .bad_file or .bad_url attribute,
whichever is not None, should resolve this particular problem.
"""
def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs):
super().__init__(*args, **kwargs)
self.bad_urls = bad_urls if bad_urls is not None else []
self.bad_files = bad_files if bad_files is not None else []
def check_download_cache(pkgname="astropy"):
"""Do a consistency check on the cache.
.. note::
Since v5.0, this function no longer returns anything.
Because the cache is shared by all versions of ``astropy`` in all virtualenvs
run by your user, possibly concurrently, it could accumulate problems.
This could lead to hard-to-debug problems or wasted space. This function
detects a number of incorrect conditions, including nonexistent files that
are indexed, files that are indexed but in the wrong place, and, if you
request it, files whose content does not match the hash that is indexed.
This function also returns a list of non-indexed files. A few will be
associated with the shelve object; their exact names depend on the backend
used but will probably be based on ``urlmap``. The presence of other files
probably indicates that something has gone wrong and inaccessible files
have accumulated in the cache. These can be removed with
:func:`clear_download_cache`, either passing the filename returned here, or
with no arguments to empty the entire cache and return it to a
reasonable, if empty, state.
Parameters
----------
pkgname : str, optional
The package name to use to locate the download cache, i.e., for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Raises
------
`~astropy.utils.data.CacheDamaged`
To indicate a problem with the cache contents; the exception contains
a ``.bad_files`` attribute containing a set of filenames to allow the
user to use :func:`clear_download_cache` to remove the offending items.
OSError, RuntimeError
To indicate some problem with the cache structure. This may need a full
:func:`clear_download_cache` to resolve, or may indicate some kind of
misconfiguration.
"""
bad_files = set()
messages = set()
dldir = _get_download_cache_loc(pkgname=pkgname)
with os.scandir(dldir) as it:
for entry in it:
f = os.path.abspath(os.path.join(dldir, entry.name))
if entry.name.startswith("rmtree-"):
if f not in _tempfilestodel:
bad_files.add(f)
messages.add(f"Cache entry {entry.name} not scheduled for deletion")
elif entry.is_dir():
for sf in os.listdir(f):
if sf in ["url", "contents"]:
continue
sf = os.path.join(f, sf)
bad_files.add(sf)
messages.add(f"Unexpected file f{sf}")
urlf = os.path.join(f, "url")
url = None
if not os.path.isfile(urlf):
bad_files.add(urlf)
messages.add(f"Problem with URL file f{urlf}")
else:
url = get_file_contents(urlf, encoding="utf-8")
if not _is_url(url):
bad_files.add(f)
messages.add(f"Malformed URL: {url}")
else:
hashname = _url_to_dirname(url)
if entry.name != hashname:
bad_files.add(f)
messages.add(
f"URL hashes to {hashname} but is stored in"
f" {entry.name}"
)
if not os.path.isfile(os.path.join(f, "contents")):
bad_files.add(f)
if url is None:
messages.add(f"Hash {entry.name} is missing contents")
else:
messages.add(
f"URL {url} with hash {entry.name} is missing contents"
)
else:
bad_files.add(f)
messages.add(f"Left-over non-directory {f} in cache")
if bad_files:
raise CacheDamaged("\n".join(messages), bad_files=bad_files)
@contextlib.contextmanager
def _SafeTemporaryDirectory(suffix=None, prefix=None, dir=None):
"""Temporary directory context manager
This will not raise an exception if the temporary directory goes away
before it's supposed to be deleted. Specifically, what is deleted will
be the directory *name* produced; if no such directory exists, no
exception will be raised.
It would be safer to delete it only if it's really the same directory
- checked by file descriptor - and if it's still called the same thing.
But that opens a platform-specific can of worms.
It would also be more robust to use ExitStack and TemporaryDirectory,
which is more aggressive about removing readonly things.
"""
d = mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield d
finally:
try:
shutil.rmtree(d)
except OSError:
pass
def _rmtree(path, replace=None):
"""More-atomic rmtree. Ignores missing directory."""
with TemporaryDirectory(
prefix="rmtree-", dir=os.path.dirname(os.path.abspath(path))
) as d:
try:
os.rename(path, os.path.join(d, "to-zap"))
except FileNotFoundError:
pass
except PermissionError:
warn(
CacheMissingWarning(
f"Unable to remove directory {path} because a file in it "
"is in use and you are on Windows",
path,
)
)
raise
if replace is not None:
try:
os.rename(replace, path)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
def import_file_to_cache(
url_key, filename, remove_original=False, pkgname="astropy", *, replace=True
):
"""Import the on-disk file specified by filename to the cache.
The provided ``url_key`` will be the name used in the cache. The file
should contain the contents of this URL, at least notionally (the URL may
be temporarily or permanently unavailable). It is using ``url_key`` that
users will request these contents from the cache. See :func:`download_file` for
details.
If ``url_key`` already exists in the cache, it will be updated to point to
these imported contents, and its old contents will be deleted from the
cache.
Parameters
----------
url_key : str
The key to index the file under. This should probably be
the URL where the file was located, though if you obtained
it from a mirror you should use the URL of the primary
location.
filename : str
The file whose contents you want to import.
remove_original : bool
Whether to remove the original file (``filename``) once import is
complete.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
replace : boolean, optional
Whether or not to replace an existing object in the cache, if one exists.
If replacement is not requested but the object exists, silently pass.
"""
cache_dir = _get_download_cache_loc(pkgname=pkgname)
cache_dirname = _url_to_dirname(url_key)
local_dirname = os.path.join(cache_dir, cache_dirname)
local_filename = os.path.join(local_dirname, "contents")
with _SafeTemporaryDirectory(prefix="temp_dir", dir=cache_dir) as temp_dir:
temp_filename = os.path.join(temp_dir, "contents")
# Make sure we're on the same filesystem
# This will raise an exception if the url_key doesn't turn into a valid filename
shutil.copy(filename, temp_filename)
with open(os.path.join(temp_dir, "url"), "w", encoding="utf-8") as f:
f.write(url_key)
if replace:
_rmtree(local_dirname, replace=temp_dir)
else:
try:
os.rename(temp_dir, local_dirname)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
if remove_original:
os.remove(filename)
return os.path.abspath(local_filename)
def get_cached_urls(pkgname="astropy"):
"""
Get the list of URLs in the cache. Especially useful for looking up what
files are stored in your cache when you don't have internet access.
The listed URLs are the keys programs should use to access the file
contents, but those contents may have actually been obtained from a mirror.
See `~download_file` for details.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
cached_urls : list
List of cached URLs.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
return sorted(cache_contents(pkgname=pkgname).keys())
def cache_contents(pkgname="astropy"):
"""Obtain a dict mapping cached URLs to filenames.
This dictionary is a read-only snapshot of the state of the cache when this
function was called. If other processes are actively working with the
cache, it is possible for them to delete files that are listed in this
dictionary. Use with some caution if you are working on a system that is
busy with many running astropy processes, although the same issues apply to
most functions in this module.
"""
r = {}
try:
dldir = _get_download_cache_loc(pkgname=pkgname)
except OSError:
return _NOTHING
with os.scandir(dldir) as it:
for entry in it:
if entry.is_dir:
url = get_file_contents(
os.path.join(dldir, entry.name, "url"), encoding="utf-8"
)
r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents"))
return ReadOnlyDict(r)
def export_download_cache(
filename_or_obj, urls=None, overwrite=False, pkgname="astropy"
):
"""Exports the cache contents as a ZIP file.
Parameters
----------
filename_or_obj : str or file-like
Where to put the created ZIP file. Must be something the zipfile
module can write to.
urls : iterable of str or None
The URLs to include in the exported cache. The default is all
URLs currently in the cache. If a URL is included in this list
but is not currently in the cache, a KeyError will be raised.
To ensure that all are in the cache use `~download_file`
or `~download_files_in_parallel`.
overwrite : bool, optional
If filename_or_obj is a filename that exists, it will only be
overwritten if this is True.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
import_download_cache : import the contents of such a ZIP file
import_file_to_cache : import a single file directly
"""
if urls is None:
urls = get_cached_urls(pkgname)
with zipfile.ZipFile(filename_or_obj, "w" if overwrite else "x") as z:
for u in urls:
fn = download_file(u, cache=True, sources=[], pkgname=pkgname)
# Do not use os.path.join because ZIP files want
# "/" on all platforms
z_fn = urllib.parse.quote(u, safe="")
z.write(fn, z_fn)
def import_download_cache(
filename_or_obj, urls=None, update_cache=False, pkgname="astropy"
):
"""Imports the contents of a ZIP file into the cache.
Each member of the ZIP file should be named by a quoted version of the
URL whose contents it stores. These names are decoded with
:func:`~urllib.parse.unquote`.
Parameters
----------
filename_or_obj : str or file-like
Where the stored ZIP file is. Must be something the :mod:`~zipfile`
module can read from.
urls : set of str or list of str or None
The URLs to import from the ZIP file. The default is all
URLs in the file.
update_cache : bool, optional
If True, any entry in the ZIP file will overwrite the value in the
cache; if False, leave untouched any entry already in the cache.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
export_download_cache : export the contents the cache to of such a ZIP file
import_file_to_cache : import a single file directly
"""
with zipfile.ZipFile(filename_or_obj, "r") as z, TemporaryDirectory() as d:
for i, zf in enumerate(z.infolist()):
url = urllib.parse.unquote(zf.filename)
# FIXME(aarchiba): do we want some kind of validation on this URL?
# urllib.parse might do something sensible...but what URLs might
# they have?
# is_url in this file is probably a good check, not just here
# but throughout this file.
if urls is not None and url not in urls:
continue
if not update_cache and is_url_in_cache(url, pkgname=pkgname):
continue
f_temp_name = os.path.join(d, str(i))
with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp:
block = f_zip.read(conf.download_block_size)
while block:
f_temp.write(block)
block = f_zip.read(conf.download_block_size)
import_file_to_cache(
url, f_temp_name, remove_original=True, pkgname=pkgname
)
|
aa23a78c03598f86bf2c8467c3424d3d9fb5f9f0aed3e89cc0e9b22b7ba6aa5f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import numpy as np
import astropy.units as u
from astropy.coordinates import ITRS, CartesianRepresentation, SphericalRepresentation
from astropy.utils import unbroadcast
from .wcs import WCS, WCSSUB_LATITUDE, WCSSUB_LONGITUDE
__doctest_skip__ = ["wcs_to_celestial_frame", "celestial_frame_to_wcs"]
__all__ = [
"obsgeo_to_frame",
"add_stokes_axis_to_wcs",
"celestial_frame_to_wcs",
"wcs_to_celestial_frame",
"proj_plane_pixel_scales",
"proj_plane_pixel_area",
"is_proj_plane_distorted",
"non_celestial_pixel_scales",
"skycoord_to_pixel",
"pixel_to_skycoord",
"custom_wcs_to_frame_mappings",
"custom_frame_to_wcs_mappings",
"pixel_to_pixel",
"local_partial_pixel_derivatives",
"fit_wcs_from_points",
]
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = "STOKES"
newwcs.wcs.cname[add_before_ind] = "STOKES"
return newwcs
def _wcs_to_celestial_frame_builtin(wcs):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
FK4NoETerms,
Galactic,
SphericalRepresentation,
)
# Import astropy.time here otherwise setup.py fails before extensions are compiled
from astropy.time import Time
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[wcs.wcs.lng][:4]
ycoord = wcs.wcs.ctype[wcs.wcs.lat][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == "" and xcoord == "RA--" and ycoord == "DEC-":
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.0:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == "FK4":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4(equinox=equinox)
elif radesys == "FK4-NO-E":
if equinox is not None:
equinox = Time(equinox, format="byear")
frame = FK4NoETerms(equinox=equinox)
elif radesys == "FK5":
if equinox is not None:
equinox = Time(equinox, format="jyear")
frame = FK5(equinox=equinox)
elif radesys == "ICRS":
frame = ICRS()
else:
if xcoord == "GLON" and ycoord == "GLAT":
frame = Galactic()
elif xcoord == "TLON" and ycoord == "TLAT":
# The default representation for ITRS is cartesian, but for WCS
# purposes, we need the spherical representation.
frame = ITRS(
representation_type=SphericalRepresentation,
obstime=wcs.wcs.dateobs or None,
)
else:
frame = None
return frame
def _celestial_frame_to_wcs_builtin(frame, projection="TAN"):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
BaseRADecFrame,
FK4NoETerms,
Galactic,
)
# Create a 2-dimensional WCS
wcs = WCS(naxis=2)
if isinstance(frame, BaseRADecFrame):
xcoord = "RA--"
ycoord = "DEC-"
if isinstance(frame, ICRS):
wcs.wcs.radesys = "ICRS"
elif isinstance(frame, FK4NoETerms):
wcs.wcs.radesys = "FK4-NO-E"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK4):
wcs.wcs.radesys = "FK4"
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK5):
wcs.wcs.radesys = "FK5"
wcs.wcs.equinox = frame.equinox.jyear
else:
return None
elif isinstance(frame, Galactic):
xcoord = "GLON"
ycoord = "GLAT"
elif isinstance(frame, ITRS):
xcoord = "TLON"
ycoord = "TLAT"
wcs.wcs.radesys = "ITRS"
wcs.wcs.dateobs = frame.obstime.utc.isot
else:
return None
wcs.wcs.ctype = [xcoord + "-" + projection, ycoord + "-" + projection]
return wcs
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]]
class custom_wcs_to_frame_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, "__call__"):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
# Backward-compatibility
custom_frame_mappings = custom_wcs_to_frame_mappings
class custom_frame_to_wcs_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, "__call__"):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError(
"Could not determine celestial frame corresponding to the specified WCS object"
)
def celestial_frame_to_wcs(frame, projection="TAN"):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError(
"Could not determine WCS corresponding to the specified coordinate frame."
)
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : ndarray
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or _has_distortion(cwcs) # fmt: skip
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if pixarea == 0.0:
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return cd_unitary_err < maxerr
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1 - np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd)) * u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(
getattr(wcs, dist_attr) is not None
for dist_attr in ["cpdis1", "cpdis2", "det2im1", "det2im2", "sip"]
)
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode="all"):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == "all":
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == "wcs":
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode="all", cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : `~astropy.coordinates.SkyCoord` subclass
The celestial coordinates. Whatever ``cls`` type is.
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == "all":
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == "wcs":
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
def _unique_with_order_preserved(items):
"""
Return a list of unique items in the list provided, preserving the order
in which they are found.
"""
new_items = []
for item in items:
if item not in new_items:
new_items.append(item)
return new_items
def _pixel_to_world_correlation_matrix(wcs):
"""
Return a correlation matrix between the pixel coordinates and the
high level world coordinates, along with the list of high level world
coordinate classes.
The shape of the matrix is ``(n_world, n_pix)``, where ``n_world`` is the
number of high level world coordinates.
"""
# We basically want to collapse the world dimensions together that are
# combined into the same high-level objects.
# Get the following in advance as getting these properties can be expensive
all_components = wcs.low_level_wcs.world_axis_object_components
all_classes = wcs.low_level_wcs.world_axis_object_classes
axis_correlation_matrix = wcs.low_level_wcs.axis_correlation_matrix
components = _unique_with_order_preserved([c[0] for c in all_components])
matrix = np.zeros((len(components), wcs.pixel_n_dim), dtype=bool)
for iworld in range(wcs.world_n_dim):
iworld_unique = components.index(all_components[iworld][0])
matrix[iworld_unique] |= axis_correlation_matrix[iworld]
classes = [all_classes[component][0] for component in components]
return matrix, classes
def _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out):
"""
Correlation matrix between the input and output pixel coordinates for a
pixel -> world -> pixel transformation specified by two WCS instances.
The first WCS specified is the one used for the pixel -> world
transformation and the second WCS specified is the one used for the world ->
pixel transformation. The shape of the matrix is
``(n_pixel_out, n_pixel_in)``.
"""
matrix1, classes1 = _pixel_to_world_correlation_matrix(wcs_in)
matrix2, classes2 = _pixel_to_world_correlation_matrix(wcs_out)
if len(classes1) != len(classes2):
raise ValueError("The two WCS return a different number of world coordinates")
# Check if classes match uniquely
unique_match = True
mapping = []
for class1 in classes1:
matches = classes2.count(class1)
if matches == 0:
raise ValueError("The world coordinate types of the two WCS do not match")
elif matches > 1:
unique_match = False
break
else:
mapping.append(classes2.index(class1))
if unique_match:
# Classes are unique, so we need to re-order matrix2 along the world
# axis using the mapping we found above.
matrix2 = matrix2[mapping]
elif classes1 != classes2:
raise ValueError(
"World coordinate order doesn't match and automatic matching is ambiguous"
)
matrix = np.matmul(matrix2.T, matrix1)
return matrix
def _split_matrix(matrix):
"""
Given an axis correlation matrix from a WCS object, return information about
the individual WCS that can be split out.
The output is a list of tuples, where each tuple contains a list of
pixel dimensions and a list of world dimensions that can be extracted to
form a new WCS. For example, in the case of a spectral cube with the first
two world coordinates being the celestial coordinates and the third
coordinate being an uncorrelated spectral axis, the matrix would look like::
array([[ True, True, False],
[ True, True, False],
[False, False, True]])
and this function will return ``[([0, 1], [0, 1]), ([2], [2])]``.
"""
pixel_used = []
split_info = []
for ipix in range(matrix.shape[1]):
if ipix in pixel_used:
continue
pixel_include = np.zeros(matrix.shape[1], dtype=bool)
pixel_include[ipix] = True
n_pix_prev, n_pix = 0, 1
while n_pix > n_pix_prev:
world_include = matrix[:, pixel_include].any(axis=1)
pixel_include = matrix[world_include, :].any(axis=0)
n_pix_prev, n_pix = n_pix, np.sum(pixel_include)
pixel_indices = list(np.nonzero(pixel_include)[0])
world_indices = list(np.nonzero(world_include)[0])
pixel_used.extend(pixel_indices)
split_info.append((pixel_indices, world_indices))
return split_info
def pixel_to_pixel(wcs_in, wcs_out, *inputs):
"""
Transform pixel coordinates in a dataset with a WCS to pixel coordinates
in another dataset with a different WCS.
This function is designed to efficiently deal with input pixel arrays that
are broadcasted views of smaller arrays, and is compatible with any
APE14-compliant WCS.
Parameters
----------
wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the original dataset which complies with the
high-level shared APE 14 WCS API.
wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the target dataset which complies with the
high-level shared APE 14 WCS API.
*inputs :
Scalars or arrays giving the pixel coordinates to transform.
"""
# Shortcut for scalars
if np.isscalar(inputs[0]):
world_outputs = wcs_in.pixel_to_world(*inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
return wcs_out.world_to_pixel(*world_outputs)
# Remember original shape
original_shape = inputs[0].shape
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
split_info = _split_matrix(matrix)
outputs = [None] * wcs_out.pixel_n_dim
for pixel_in_indices, pixel_out_indices in split_info:
pixel_inputs = []
for ipix in range(wcs_in.pixel_n_dim):
if ipix in pixel_in_indices:
pixel_inputs.append(unbroadcast(inputs[ipix]))
else:
pixel_inputs.append(inputs[ipix].flat[0])
pixel_inputs = np.broadcast_arrays(*pixel_inputs)
world_outputs = wcs_in.pixel_to_world(*pixel_inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
pixel_outputs = wcs_out.world_to_pixel(*world_outputs)
if wcs_out.pixel_n_dim == 1:
pixel_outputs = (pixel_outputs,)
for ipix in range(wcs_out.pixel_n_dim):
if ipix in pixel_out_indices:
outputs[ipix] = np.broadcast_to(pixel_outputs[ipix], original_shape)
return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs
def local_partial_pixel_derivatives(wcs, *pixel, normalize_by_world=False):
"""
Return a matrix of shape ``(world_n_dim, pixel_n_dim)`` where each entry
``[i, j]`` is the partial derivative d(world_i)/d(pixel_j) at the requested
pixel position.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS transformation to evaluate the derivatives for.
*pixel : float
The scalar pixel coordinates at which to evaluate the derivatives.
normalize_by_world : bool
If `True`, the matrix is normalized so that for each world entry
the derivatives add up to 1.
"""
# Find the world coordinates at the requested pixel
pixel_ref = np.array(pixel)
world_ref = np.array(wcs.pixel_to_world_values(*pixel_ref))
# Set up the derivative matrix
derivatives = np.zeros((wcs.world_n_dim, wcs.pixel_n_dim))
for i in range(wcs.pixel_n_dim):
pixel_off = pixel_ref.copy()
pixel_off[i] += 1
world_off = np.array(wcs.pixel_to_world_values(*pixel_off))
derivatives[:, i] = world_off - world_ref
if normalize_by_world:
derivatives /= derivatives.sum(axis=0)[:, np.newaxis]
return derivatives
def _linear_wcs_fit(params, lon, lat, x, y, w_obj):
"""
Objective function for fitting linear terms.
Parameters
----------
params : array
6 element array. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
x, y: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
cd = params[0:4]
crpix = params[4:6]
w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))
w_obj.wcs.crpix = crpix
lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)
lat_resids = lat - lat2
lon_resids = lon - lon2
# In case the longitude has wrapped around
lon_resids = np.mod(lon_resids - 180.0, 360.0) - 180.0
resids = np.concatenate((lon_resids * np.cos(np.radians(lat)), lat_resids))
return resids
def _sip_fit(params, lon, lat, u, v, w_obj, order, coeff_names):
"""Objective function for fitting SIP.
Parameters
----------
params : array
Fittable parameters. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
u, v: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
from astropy.modeling.models import SIP # here to avoid circular import
# unpack params
crpix = params[0:2]
cdx = params[2:6].reshape((2, 2))
a_params = params[6 : 6 + len(coeff_names)]
b_params = params[6 + len(coeff_names) :]
# assign to wcs, used for transfomations in this function
w_obj.wcs.cd = cdx
w_obj.wcs.crpix = crpix
a_coeff, b_coeff = {}, {}
for i in range(len(coeff_names)):
a_coeff["A_" + coeff_names[i]] = a_params[i]
b_coeff["B_" + coeff_names[i]] = b_params[i]
sip = SIP(
crpix=crpix, a_order=order, b_order=order, a_coeff=a_coeff, b_coeff=b_coeff
)
fuv, guv = sip(u, v)
xo, yo = np.dot(cdx, np.array([u + fuv - crpix[0], v + guv - crpix[1]]))
# use all pix2world in case `projection` contains distortion table
x, y = w_obj.all_world2pix(lon, lat, 0)
x, y = np.dot(w_obj.wcs.cd, (x - w_obj.wcs.crpix[0], y - w_obj.wcs.crpix[1]))
resids = np.concatenate((x - xo, y - yo))
return resids
def fit_wcs_from_points(
xy, world_coords, proj_point="center", projection="TAN", sip_degree=None
):
"""
Given two matching sets of coordinates on detector and sky,
compute the WCS.
Fits a WCS object to matched set of input detector and sky coordinates.
Optionally, a SIP can be fit to account for geometric
distortion. Returns an `~astropy.wcs.WCS` object with the best fit
parameters for mapping between input pixel and sky coordinates.
The projection type (default 'TAN') can passed in as a string, one of
the valid three-letter projection codes - or as a WCS object with
projection keywords already set. Note that if an input WCS has any
non-polynomial distortion, this will be applied and reflected in the
fit terms and coefficients. Passing in a WCS object in this way essentially
allows it to be refit based on the matched input coordinates and projection
point, but take care when using this option as non-projection related
keywords in the input might cause unexpected behavior.
Notes
-----
- The fiducial point for the spherical projection can be set to 'center'
to use the mean position of input sky coordinates, or as an
`~astropy.coordinates.SkyCoord` object.
- Units in all output WCS objects will always be in degrees.
- If the coordinate frame differs between `~astropy.coordinates.SkyCoord`
objects passed in for ``world_coords`` and ``proj_point``, the frame for
``world_coords`` will override as the frame for the output WCS.
- If a WCS object is passed in to ``projection`` the CD/PC matrix will
be used as an initial guess for the fit. If this is known to be
significantly off and may throw off the fit, set to the identity matrix
(for example, by doing wcs.wcs.pc = [(1., 0.,), (0., 1.)])
Parameters
----------
xy : (`numpy.ndarray`, `numpy.ndarray`) tuple
x & y pixel coordinates.
world_coords : `~astropy.coordinates.SkyCoord`
Skycoord object with world coordinates.
proj_point : 'center' or ~astropy.coordinates.SkyCoord`
Defaults to 'center', in which the geometric center of input world
coordinates will be used as the projection point. To specify an exact
point for the projection, a Skycoord object with a coordinate pair can
be passed in. For consistency, the units and frame of these coordinates
will be transformed to match ``world_coords`` if they don't.
projection : str or `~astropy.wcs.WCS`
Three letter projection code, of any of standard projections defined
in the FITS WCS standard. Optionally, a WCS object with projection
keywords set may be passed in.
sip_degree : None or int
If set to a non-zero integer value, will fit SIP of degree
``sip_degree`` to model geometric distortion. Defaults to None, meaning
no distortion corrections will be fit.
Returns
-------
wcs : `~astropy.wcs.WCS`
The best-fit WCS to the points given.
"""
from scipy.optimize import least_squares
import astropy.units as u
from astropy.coordinates import SkyCoord # here to avoid circular import
from .wcs import Sip
xp, yp = xy
try:
lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg
except AttributeError:
unit_sph = world_coords.unit_spherical
lon, lat = unit_sph.lon.deg, unit_sph.lat.deg
# verify input
if (type(proj_point) != type(world_coords)) and (proj_point != "center"):
raise ValueError(
"proj_point must be set to 'center', or an"
+ "`~astropy.coordinates.SkyCoord` object with "
+ "a pair of points."
)
use_center_as_proj_point = str(proj_point) == "center"
if not use_center_as_proj_point:
assert proj_point.size == 1
proj_codes = [
"AZP",
"SZP",
"TAN",
"STG",
"SIN",
"ARC",
"ZEA",
"AIR",
"CYP",
"CEA",
"CAR",
"MER",
"SFL",
"PAR",
"MOL",
"AIT",
"COP",
"COE",
"COD",
"COO",
"BON",
"PCO",
"TSC",
"CSC",
"QSC",
"HPX",
"XPH",
]
if type(projection) == str:
if projection not in proj_codes:
raise ValueError(
"Must specify valid projection code from list of "
+ "supported types: ",
", ".join(proj_codes),
)
# empty wcs to fill in with fit values
wcs = celestial_frame_to_wcs(frame=world_coords.frame, projection=projection)
else: # if projection is not string, should be wcs object. use as template.
wcs = copy.deepcopy(projection)
wcs.cdelt = (1.0, 1.0) # make sure cdelt is 1
wcs.sip = None
# Change PC to CD, since cdelt will be set to 1
if wcs.wcs.has_pc():
wcs.wcs.cd = wcs.wcs.pc
wcs.wcs.__delattr__("pc")
if (type(sip_degree) != type(None)) and (type(sip_degree) != int):
raise ValueError("sip_degree must be None, or integer.")
# compute bounding box for sources in image coordinates:
xpmin, xpmax, ypmin, ypmax = xp.min(), xp.max(), yp.min(), yp.max()
# set pixel_shape to span of input points
wcs.pixel_shape = (
1 if xpmax <= 0.0 else int(np.ceil(xpmax)),
1 if ypmax <= 0.0 else int(np.ceil(ypmax)),
)
# determine CRVAL from input
close = lambda l, p: p[np.argmin(np.abs(l))]
if use_center_as_proj_point: # use center of input points
sc1 = SkyCoord(lon.min() * u.deg, lat.max() * u.deg)
sc2 = SkyCoord(lon.max() * u.deg, lat.min() * u.deg)
pa = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
midpoint_sc = sc1.directional_offset_by(pa, sep / 2)
wcs.wcs.crval = (midpoint_sc.data.lon.deg, midpoint_sc.data.lat.deg)
wcs.wcs.crpix = ((xpmax + xpmin) / 2.0, (ypmax + ypmin) / 2.0)
else: # convert units, initial guess for crpix
proj_point.transform_to(world_coords)
wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg)
wcs.wcs.crpix = (
close(lon - wcs.wcs.crval[0], xp + 1),
close(lon - wcs.wcs.crval[1], yp + 1),
)
# fit linear terms, assign to wcs
# use (1, 0, 0, 1) as initial guess, in case input wcs was passed in
# and cd terms are way off.
# Use bounds to require that the fit center pixel is on the input image
if xpmin == xpmax:
xpmin, xpmax = xpmin - 0.5, xpmax + 0.5
if ypmin == ypmax:
ypmin, ypmax = ypmin - 0.5, ypmax + 0.5
p0 = np.concatenate([wcs.wcs.cd.flatten(), wcs.wcs.crpix.flatten()])
fit = least_squares(
_linear_wcs_fit,
p0,
args=(lon, lat, xp, yp, wcs),
bounds=[
[-np.inf, -np.inf, -np.inf, -np.inf, xpmin + 1, ypmin + 1],
[np.inf, np.inf, np.inf, np.inf, xpmax + 1, ypmax + 1],
],
)
wcs.wcs.crpix = np.array(fit.x[4:6])
wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2)))
# fit SIP, if specified. Only fit forward coefficients
if sip_degree:
degree = sip_degree
if "-SIP" not in wcs.wcs.ctype[0]:
wcs.wcs.ctype = [x + "-SIP" for x in wcs.wcs.ctype]
coef_names = [
f"{i}_{j}"
for i in range(degree + 1)
for j in range(degree + 1)
if (i + j) < (degree + 1) and (i + j) > 1
]
p0 = np.concatenate(
(
np.array(wcs.wcs.crpix),
wcs.wcs.cd.flatten(),
np.zeros(2 * len(coef_names)),
)
)
fit = least_squares(
_sip_fit,
p0,
args=(lon, lat, xp, yp, wcs, degree, coef_names),
bounds=[
[xpmin + 1, ypmin + 1] + [-np.inf] * (4 + 2 * len(coef_names)),
[xpmax + 1, ypmax + 1] + [np.inf] * (4 + 2 * len(coef_names)),
],
)
coef_fit = (
list(fit.x[6 : 6 + len(coef_names)]),
list(fit.x[6 + len(coef_names) :]),
)
# put fit values in wcs
wcs.wcs.cd = fit.x[2:6].reshape((2, 2))
wcs.wcs.crpix = fit.x[0:2]
a_vals = np.zeros((degree + 1, degree + 1))
b_vals = np.zeros((degree + 1, degree + 1))
for coef_name in coef_names:
a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0)
b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0)
wcs.sip = Sip(
a_vals,
b_vals,
np.zeros((degree + 1, degree + 1)),
np.zeros((degree + 1, degree + 1)),
wcs.wcs.crpix,
)
return wcs
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an `~.builtin_frames.ITRS` coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time associated with the coordinate, will be passed to
`~.builtin_frames.ITRS` as the obstime keyword.
Returns
-------
`~.builtin_frames.ITRS`
An `~.builtin_frames.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (
obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
):
raise ValueError(
f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array"
)
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
|
fe927ab18385261d020a2b56c16a501a78608bbd86685ad1d92737256376b60d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Under the hood, there are 3 separate classes that perform different
# parts of the transformation:
#
# - `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
# functionality in `wcslib`_. (This includes TPV and TPD
# polynomial distortion, but not SIP distortion).
#
# - `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
# `SIP`_ convention.
#
# - `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
# lookup tables.
#
# Additionally, the class `WCS` aggregates all of these transformations
# together in a pipeline:
#
# - Detector to image plane correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
# object)
#
# - `distortion paper`_ table-lookup correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
# STDLIB
import builtins
import copy
import io
import itertools
import os
import re
import textwrap
import uuid
import warnings
# THIRD-PARTY
import numpy as np
from packaging.version import Version
# LOCAL
from astropy import log
from astropy import units as u
from astropy.io import fits
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyUserWarning,
AstropyWarning,
)
from . import _wcs, docstrings
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = [
"FITSFixedWarning",
"WCS",
"find_all_wcs",
"DistortionLookupTable",
"Sip",
"Tabprm",
"Wcsprm",
"Auxprm",
"Celprm",
"Prjprm",
"Wtbarr",
"WCSBase",
"validate",
"WcsError",
"SingularMatrixError",
"InconsistentAxisTypesError",
"InvalidTransformError",
"InvalidCoordinateError",
"InvalidPrjParametersError",
"NoSolutionError",
"InvalidSubimageSpecificationError",
"NoConvergence",
"NonseparableSubimageCoordinateSystemError",
"NoWcsKeywordsFoundError",
"InvalidTabularParametersError",
]
__doctest_skip__ = ["WCS.all_world2pix"]
if _wcs is not None:
if Version(_wcs.__version__) < Version("5.8"):
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used."
)
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build on your platform."
)
_WCSSUB_TIME_SUPPORT = Version(_wcs.__version__) >= Version("7.8")
_WCS_TPD_WARN_LT71 = Version(_wcs.__version__) < Version("7.1")
_WCS_TPD_WARN_LT74 = Version(_wcs.__version__) < Version("7.4")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Auxprm = _wcs.Auxprm
Celprm = _wcs.Celprm
Prjprm = _wcs.Prjprm
Tabprm = _wcs.Tabprm
Wtbarr = _wcs.Wtbarr
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = (
_wcs.NonseparableSubimageCoordinateSystemError
)
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
InvalidPrjParametersError = _wcs.InvalidPrjParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(("WCSSUB_", "WCSHDR_", "WCSHDO_", "WCSCOMPARE_", "PRJ_")):
locals()[key] = val
__all__.append(key)
# Set coordinate extraction callback for WCS -TAB:
def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim):
arr = hdulist[(extnam, extver)].data[ttype][row - 1]
if arr.ndim != ndim:
if kind == "c" and ndim == 2:
arr = arr.reshape((arr.size, 1))
else:
raise ValueError("Bad TDIM")
return np.ascontiguousarray(arr, dtype=np.double)
_wcs.set_wtbarr_fitsio_callback(_load_tab_bintable)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
Wtbarr = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
_WCSSUB_TIME_SUPPORT = False
_WCS_TPD_WARN_LT71 = False
_WCS_TPD_WARN_LT74 = False
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile("""^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$""")
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == "image":
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == "binary":
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == "pixel":
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' and/or 'pixel'"
)
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(
self,
*args,
best_solution=None,
accuracy=None,
niter=None,
divergent=None,
slow_conv=None,
**kwargs,
):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn(
f"Function received unexpected arguments ({list(kwargs)}) these "
"are ignored but will raise an Exception in the "
"future.",
AstropyDeprecationWarning,
)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: https://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : `~astropy.io.fits.HDUList`, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of str, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
""" # noqa: E501
def __init__(
self,
header=None,
fobj=None,
key=" ",
minerr=0.0,
relax=True,
naxis=None,
keysel=None,
colsel=None,
fix=True,
translate_units="",
_do_set=True,
):
close_fds = []
# these parameters are stored to be used when unpickling a WCS object:
self._init_kwargs = {
"keysel": copy.copy(keysel),
"colsel": copy.copy(colsel),
}
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key, relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = os.path.exists(header)
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2"
)
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object"
)
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
if not (fobj is None or isinstance(fobj, fits.HDUList)):
raise AssertionError(
"'fobj' must be either None or an astropy.io.fits.HDUList object."
)
est_naxis = 2
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode("ascii")
tmp_wcsprm = _wcs.Wcsprm(
header=tmp_header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
warnings=False,
hdulist=fobj,
)
if naxis is not None:
try:
tmp_wcsprm = tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2
except _wcs.NoWcsKeywordsFoundError:
pass
self.naxis = est_naxis
header = fits.Header.fromstring(header_string)
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(header, fobj, dist="CPDIS", err=minerr)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace("END" + " " * 77, "")
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
try:
wcsprm = _wcs.Wcsprm(
header=header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(
header=None,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if wcsprm.naxis != 2 and (
det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip
):
raise ValueError(
f"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {wcsprm.naxis} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
"""
)
header_naxis = header.get("NAXIS", None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
f"The WCS transformation has more axes ({wcsprm.naxis:d}) than the "
f"image it is associated with ({header_naxis:d})",
FITSFixedWarning,
)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
if header is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
self.fix(translate_units=translate_units)
else:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(
new_copy,
self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2),
)
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(
new_copy,
deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo), deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo), deepcopy(self.det2im2, memo)),
)
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
# We need to know which axes have been dropped, but there is no easy
# way to do this with the .sub function, so instead we assign UUIDs to
# the CNAME parameters in copy.wcs. We can later access the original
# CNAME properties from self.wcs.
cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)]
copy.wcs.cname = cname_uuid
# Subset the WCS
copy.wcs = copy.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
# Construct a list of dimensions from the original WCS in the order
# in which they appear in the final WCS.
keep = [
cname_uuid.index(cname) if cname in cname_uuid else None
for cname in copy.wcs.cname
]
# Restore the original CNAMEs
copy.wcs.cname = ["" if i is None else self.wcs.cname[i] for i in keep]
# Subset pixel_shape and pixel_bounds
if self.pixel_shape:
copy.pixel_shape = tuple(
None if i is None else self.pixel_shape[i] for i in keep
)
if self.pixel_bounds:
copy.pixel_bounds = [
None if i is None else self.pixel_bounds[i] for i in keep
]
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
"""
# Nothing to be done if no WCS attached
if self.wcs is None:
return
# Nothing to be done if no PV parameters attached
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Nothing to be done if any radial terms are present...
# Loop over list to find any radial terms.
# Certain values of the `j' index are used for storing
# radial terms; refer to Equation (1) in
# <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>.
pv = np.asarray(pv)
# Loop over distinct values of `i' index
for i in set(pv[:, 0]):
# Get all values of `j' index for this value of `i' index
js = set(pv[:, 1][pv[:, 0] == i])
# Find max value of `j' index
max_j = max(js)
for j in (3, 11, 23, 39):
if j < max_j and j in js:
return
self.wcs.set_pv([])
warnings.warn(
"Removed redundant SCAMP distortion parameters "
+ "because SIP parameters are also present",
FITSFixedWarning,
)
def fix(self, translate_units="", naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array, optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
if (
key == "datfix"
and "1858-11-17" in val
and not np.count_nonzero(self.wcs.mjdref)
):
continue
warnings.warn(
f"'{key}' made the change '{val}'.",
FITSFixedWarning,
)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : (int, int), optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn(
"Need a valid header in order to calculate footprint\n",
AstropyUserWarning,
)
return None
else:
naxis1 = header.get("NAXIS1", None)
naxis2 = header.get("NAXIS2", None)
if naxis1 is None or naxis2 is None:
raise ValueError("Image size could not be determined.")
if center:
corners = np.array(
[[1, 1], [1, naxis2], [naxis1, naxis2], [naxis1, 1]], dtype=np.float64
)
else:
corners = np.array(
[
[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5],
],
dtype=np.float64,
)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header["AXISCORR"]
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = "D2IMDIS"
d_kw = "D2IM"
err_kw = "D2IMERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == "lookup":
del header[distortion]
assert isinstance(fobj, fits.HDUList), (
"An astropy.io.fits.HDUList"
"is required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["D2IMARR", d_extver].data
else:
d_data = (fobj["D2IMARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["D2IMARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn(
"The use of ``AXISCORR`` for D2IM correction has been"
" deprecated.`~astropy.wcs` will read in files with ``AXISCORR`` but"
" ``to_fits()`` will write out files without it.",
AstropyDeprecationWarning,
)
cpdis = [None, None]
crpix = [0.0, 0.0]
crval = [0.0, 0.0]
cdelt = [1.0, 1.0]
try:
d2im_data = fobj[("D2IMARR", 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[("D2IMARR", 1)].header
naxis = d2im_hdr["NAXIS"]
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get("CRPIX" + str(i), 0.0)
crval[i - 1] = d2im_hdr.get("CRVAL" + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get("CDELT" + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`~astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = "D2IMDIS"
d_kw = "D2IM"
def write_d2i(num, det2im):
if det2im is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Detector to image correction type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(det2im.data.shape),
"Number of independent variables in D2IM function",
)
for i in range(det2im.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a D2IM function",
)
image = fits.ImageHDU(det2im.data, name="D2IMARR")
header = image.header
header["CRPIX1"] = (det2im.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (det2im.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
det2im.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
det2im.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (det2im.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (det2im.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist="CPDIS", err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == "CPDIS":
d_kw = "DP"
err_kw = "CPERR"
else:
d_kw = "DQ"
err_kw = "CQERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == "lookup":
if not isinstance(fobj, fits.HDUList):
raise ValueError(
"an astropy.io.fits.HDUList is "
"required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["WCSDVARR", d_extver].data
else:
d_data = (fobj["WCSDVARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["WCSDVARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist="CPDIS"):
"""
Write out `distortion paper`_ keywords to the given
`~astropy.io.fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == "CPDIS":
d_kw = "DP"
else:
d_kw = "DQ"
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Prior distortion function type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(cpdis.data.shape),
f"Number of independent variables in {dist} function",
)
for i in range(cpdis.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a {dist} function",
)
image = fits.ImageHDU(cpdis.data, name="WCSDVARR")
header = image.header
header["CRPIX1"] = (cpdis.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (cpdis.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
cpdis.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
cpdis.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (cpdis.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (cpdis.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _remove_sip_kw(self, header):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in {
m.group() for m in map(SIP_KW.match, list(header)) if m is not None
}:
del header[key]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if "A_ORDER" in header and header["A_ORDER"] > 1:
if "B_ORDER" not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion"
)
m = int(header["A_ORDER"])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"A_{i}_{j}"
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header["B_ORDER"])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"B_{i}_{j}"
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header["A_ORDER"]
del header["B_ORDER"]
ctype = [header[f"CTYPE{nax}{wcskey}"] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith("-SIP") for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
""" # noqa: E501
log.info(message)
elif "B_ORDER" in header and header["B_ORDER"] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER "
+ "keyword for SIP distortion"
)
else:
a = None
b = None
if "AP_ORDER" in header and header["AP_ORDER"] > 1:
if "BP_ORDER" not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion"
)
m = int(header["AP_ORDER"])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"AP_{i}_{j}"
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header["BP_ORDER"])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"BP_{i}_{j}"
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header["AP_ORDER"]
del header["BP_ORDER"]
elif "BP_ORDER" in header and header["BP_ORDER"] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion"
)
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header:
raise ValueError("Header has SIP keywords without CRPIX keywords")
crpix1 = header.get(f"CRPIX1{wcskey}")
crpix2 = header.get(f"CRPIX2{wcskey}")
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
trdir = "sky to detector" if name[-1] == "P" else "detector to sky"
comment = (
f'SIP polynomial order, axis {ord(name[0]) - ord("A"):d}, {trdir:s}'
)
keywords[f"{name}_ORDER"] = size - 1, comment
comment = "SIP distortion coefficient"
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[f"{name}_{i:d}_{j:d}"] = a[i, j], comment
write_array("A", self.sip.a)
write_array("B", self.sip.b)
write_array("AP", self.sip.ap)
write_array("BP", self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be used as input"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be "
"used as input"
)
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any([x.size == 0 for x in axes]):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other"
)
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == "output":
output = self._normalize_sky(output)
return (
output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape),
)
return [output[:, i].reshape(axes[0].shape) for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
f"of shape (N, {self.naxis})"
)
if 0 in xy.shape:
return xy
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == "output":
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
f"(coords[N][{self.naxis}], origin)"
)
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be "
+ "a 1-D array for each axis, followed by an origin."
)
return _return_list_of_arrays(axes, origin)
raise TypeError(
f"WCS projection has {self.naxis} dimensions, so expected 2 (an Nx{self.naxis} array "
f"and the origin argument) or {self.naxis + 1} arguments (the position in each "
f"dimension, and the origin argument). Instead, {len(args)} arguments were "
"given."
)
def all_pix2world(self, *args, **kwargs):
return self._array_converter(self._all_pix2world, "output", *args, **kwargs)
all_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('sky coordinates, in degrees', 8)}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)["world"], "output", *args, **kwargs
)
wcs_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('world coordinates, in degrees', 8)}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
"""
def _all_world2pix(
self, world, origin, tolerance, maxiter, adaptive, detect_divergence, quiet
):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()["invalid"]
old_over = np.geterr()["over"]
np.seterr(invalid="ignore", over="ignore")
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while np.nanmax(dn) >= tol2 and k < maxiter:
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = dn >= dnprev
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = dn >= tol2
(inddiv,) = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = dn < dnprev
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
(ind,) = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
(ind,) = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while ind.shape[0] > 0 and k < maxiter:
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookkeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = dnnew < dnprev[ind]
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
(subind,) = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
(subind,) = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = (~np.all(np.isfinite(pix), axis=1)) & (
np.all(np.isfinite(world), axis=1)
)
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
(inddiv,) = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
(ind,) = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
f"converge to the requested accuracy after {k:d} "
"iterations.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=None,
)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
f"After {k:d} iterations, the solution is diverging "
"at least for one input point.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=inddiv,
)
return pix
@deprecated_renamed_argument("accuracy", "tolerance", "4.3")
def all_world2pix(
self,
*args,
tolerance=1e-4,
maxiter=20,
adaptive=False,
detect_divergence=True,
quiet=False,
**kwargs,
):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs: self._all_world2pix(
*args,
tolerance=tolerance,
maxiter=maxiter,
adaptive=adaptive,
detect_divergence=detect_divergence,
quiet=quiet,
),
"input",
*args,
**kwargs,
)
all_world2pix.__doc__ = f"""
all_world2pix(*arg, tolerance=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
tolerance : float, optional (default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
"""
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)["pixcrd"], "input", *args, **kwargs
)
wcs_world2pix.__doc__ = f"""
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = f"""
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = f"""
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def proj_plane_pixel_scales(self):
"""
Calculate pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This method is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
scale : list of `~astropy.units.Quantity`
A vector of projection plane increments corresponding to each
pixel side (axis).
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
""" # noqa: E501
from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import
values = proj_plane_pixel_scales(self)
units = [u.Unit(x) for x in self.wcs.cunit]
return [
value * unit for (value, unit) in zip(values, units)
] # Can have different units
def proj_plane_pixel_area(self):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
area : `~astropy.units.Quantity`
Area (in the projection plane) of the pixel at ``CRPIX`` location.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
""" # noqa: E501
from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import
value = proj_plane_pixel_area(self)
unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only
return value * unit
def to_fits(self, relax=False, key=None):
"""
Generate an `~astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14 # Defined by C-ext # noqa: F821
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
# Check if we can handle TPD distortion correctly
if _WCS_TPD_WARN_LT71:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}. WCSLIB"
f" {_wcs.__version__} is writing this in a format"
" incompatible with current versions - please update to"
" 7.4 or use the bundled WCSLIB.",
AstropyWarning,
)
elif _WCS_TPD_WARN_LT74:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}, which"
" requires WCSLIB 7.4 or later to store in a FITS header"
f" (having {_wcs.__version__}).",
AstropyWarning,
)
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(
not ctyp.endswith("-SIP") for ctyp in self.wcs.ctype
):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if (
not do_sip
and self.wcs is not None
and any(self.wcs.ctype)
and self.sip is not None
):
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded:"
f" {', '.join(missing_keys)} Use the ``relax`` kwarg to control"
" this.",
AstropyWarning,
)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis + 1):
# strip() must be called here to cover the case of alt key= " "
kw = f"CTYPE{i}{self.wcs.alt}".strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(
self, filename="footprint.reg", color="green", width=2, coordsys=None
):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = (
"# Region file format: DS9 version 4.0 \n"
'# global color=green font="helvetica 12 bold '
"select=1 highlite=1 edit=1 move=1 delete=1 "
"include=1 fixed=0 source\n"
)
coordsys = coordsys or self.wcs.radesys
if coordsys not in (
"PHYSICAL",
"IMAGE",
"FK4",
"B1950",
"FK5",
"J2000",
"GALACTIC",
"ECLIPTIC",
"ICRS",
"LINEAR",
"AMPLIFIER",
"DETECTOR",
):
raise ValueError(
f"Coordinate system '{coordsys}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
)
with open(filename, mode="w") as f:
f.write(comments)
f.write(f"{coordsys}\n")
f.write("polygon(")
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=",")
f.write(f") # color={color}, width={width:d} \n")
def _get_naxis(self, header=None):
_naxis = []
if header is not None and not isinstance(header, (str, bytes)):
for naxis in itertools.count(1):
try:
_naxis.append(header[f"NAXIS{naxis}"])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
"""
Return a short description. Simply porting the behavior from
the `printwcs()` method.
"""
description = ["WCS Keywords\n", f"Number of WCS axes: {self.naxis!r}"]
sfmt = " : " + "".join(["{" + f"{i}" + "!r} " for i in range(self.naxis)])
keywords = ["CTYPE", "CRVAL", "CRPIX"]
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword + sfmt.format(*value))
if hasattr(self.wcs, "pc"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["PC", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = "CDELT" + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, "cd"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["CD", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append(f"NAXIS : {' '.join(map(str, self._naxis))}")
return "\n".join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dict
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError("This WCS object does not have a wcsprm object.")
coordinate_type_map = {0: None, 1: "stokes", 2: "celestial", 3: "spectral"}
scale_map = {
0: "linear",
1: "quantized",
2: "non-linear celestial",
3: "non-linear spectral",
4: "logarithmic",
5: "tabular",
}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult["coordinate_type"] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult["scale"] = scale_map[scale]
group = (axis_type // 10) % 10
subresult["group"] = group
number = axis_type % 10
subresult["number"] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
dct = self.__dict__.copy()
dct["_alt_wcskey"] = self.wcs.alt
return (
__WCS_unpickle__,
(
self.__class__,
dct,
buffer.getvalue(),
),
)
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i + 1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with the same number of axes,
but two swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i + 1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub(
[WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES, WCSSUB_TIME]
) # Defined by C-ext
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, "__len__") and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, "__len__"): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = (
(crpix - iview.start - 1.0) / iview.step
+ 0.5
+ 1.0 / iview.step / 2.0
)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if "indices must be integers" not in str(exc):
raise
warnings.warn(
f"NAXIS{wcs_index} attribute is not updated because at "
f"least one index ('{iview}') is no integer.",
AstropyUserWarning,
)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(
self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix
)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
@property
def axis_type_names(self):
"""
World names for each coordinate axis
Returns
-------
list of str
A list of names along each axis.
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split("-")[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included
"""
return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext # noqa: F821
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def spectral(self):
"""
A copy of the current WCS with only the spectral axes included
"""
return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext # noqa: F821
@property
def is_spectral(self):
return self.has_spectral and self.naxis == 1
@property
def has_spectral(self):
try:
return self.wcs.spec >= 0
except InconsistentAxisTypesError:
return False
@property
def temporal(self):
"""
A copy of the current WCS with only the time axes included
"""
if not _WCSSUB_TIME_SUPPORT:
raise NotImplementedError(
"Support for 'temporal' axis requires WCSLIB version 7.8 or "
f"greater but linked WCSLIB version is {_wcs.__version__}"
)
return self.sub([WCSSUB_TIME]) # Defined by C-ext # noqa: F821
@property
def is_temporal(self):
return self.has_temporal and self.naxis == 1
@property
def has_temporal(self):
return any(t // 1000 == 4 for t in self.wcs.axis_types)
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (
self.sip is not None
or self.cpdis1 is not None
or self.cpdis2 is not None
or self.det2im1 is not None
and self.det2im2 is not None
)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"cdelt will be ignored since cd is present",
RuntimeWarning,
)
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.dot(cdelt, pc)
return pccd
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
naxis = dct.pop("naxis", None)
if naxis:
hdulist[0].header["naxis"] = naxis
naxes = dct.pop("_naxis", [])
for k, na in enumerate(naxes):
hdulist[0].header[f"naxis{k + 1:d}"] = na
kwargs = dct.pop("_init_kwargs", {})
self.__dict__.update(dct)
wcskey = dct.pop("_alt_wcskey", " ")
WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs)
self.pixel_bounds = dct.get("_pixel_bounds", None)
return self
def find_all_wcs(
header, relax=True, keysel=None, fix=True, translate_units="", _do_set=True
):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or `~astropy.io.fits.Header` object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
keysel : sequence of str, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS`
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError("header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str or file-like or `~astropy.io.fits.HDUList`
The FITS file to validate.
Returns
-------
results : list subclass instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [f" WCS key '{self._key or ' '}':"]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = " - "
else:
initial_indent = " "
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=" ",
)
)
else:
result.append(" No issues.")
return "\n".join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = f" ({self._hdu_name})"
else:
hdu_name = ""
result = [f"HDU {self._hdu_index}{hdu_name}:"]
for wcs in self:
result.append(repr(wcs))
return "\n".join(result)
return ""
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return "\n\n".join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject, fix=False, _do_set=False
)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", FITSFixedWarning, append=True)
try:
WCS(
hdu.header,
hdulist,
key=wcs.wcs.alt or " ",
relax=_wcs.WCSHDR_reject,
fix=True,
_do_set=False,
)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
|
d01159f4fafbfd6ad4bad65fde7959afadd19353923fc72e8058449305014884 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import enum
import operator
import os
import threading
from datetime import date, datetime, timedelta
from time import strftime
from warnings import warn
import erfa
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.extern import _strptime
from astropy.units import UnitConversionError
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo, data_info_factory
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # noqa: F401
from .formats import (
TIME_DELTA_FORMATS,
TIME_FORMATS,
TimeAstropyTime,
TimeDatetime,
TimeJD,
TimeUnique,
)
from .time_helper.function_helpers import CUSTOM_FUNCTIONS, UNSUPPORTED_FUNCTIONS
from .utils import day_frac
__all__ = [
"TimeBase",
"Time",
"TimeDelta",
"TimeInfo",
"TimeInfoBase",
"update_leap_seconds",
"TIME_SCALES",
"STANDARD_TIME_SCALES",
"TIME_DELTA_SCALES",
"ScaleValueError",
"OperandTypeError",
"TimeDeltaMissingUnitWarning",
]
STANDARD_TIME_SCALES = ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc")
LOCAL_SCALES = ("local",)
TIME_TYPES = {
scale: scales for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales
}
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {
("tai", "tcb"): ("tt", "tdb"),
("tai", "tcg"): ("tt",),
("tai", "ut1"): ("utc",),
("tai", "tdb"): ("tt",),
("tcb", "tcg"): ("tdb", "tt"),
("tcb", "tt"): ("tdb",),
("tcb", "ut1"): ("tdb", "tt", "tai", "utc"),
("tcb", "utc"): ("tdb", "tt", "tai"),
("tcg", "tdb"): ("tt",),
("tcg", "ut1"): ("tt", "tai", "utc"),
("tcg", "utc"): ("tt", "tai"),
("tdb", "ut1"): ("tt", "tai", "utc"),
("tdb", "utc"): ("tt", "tai"),
("tt", "ut1"): ("tai", "utc"),
("tt", "utc"): ("tai",),
}
GEOCENTRIC_SCALES = ("tai", "tt", "tcg")
BARYCENTRIC_SCALES = ("tcb", "tdb")
ROTATIONAL_SCALES = ("ut1",)
TIME_DELTA_TYPES = {
scale: scales
for scales in (
GEOCENTRIC_SCALES,
BARYCENTRIC_SCALES,
ROTATIONAL_SCALES,
LOCAL_SCALES,
)
for scale in scales
}
TIME_DELTA_SCALES = (
GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
)
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {
("tt", "tai"): None,
("tai", "tt"): None,
("tcg", "tt"): -erfa.ELG,
("tt", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcg", "tai"): -erfa.ELG,
("tai", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcb", "tdb"): -erfa.ELB,
("tdb", "tcb"): erfa.ELB / (1.0 - erfa.ELB),
}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
"mean": {
"IAU2006": {"function": erfa.gmst06, "scales": ("ut1", "tt")},
"IAU2000": {"function": erfa.gmst00, "scales": ("ut1", "tt")},
"IAU1982": {"function": erfa.gmst82, "scales": ("ut1",), "include_tio": False},
},
"apparent": {
"IAU2006A": {"function": erfa.gst06a, "scales": ("ut1", "tt")},
"IAU2000A": {"function": erfa.gst00a, "scales": ("ut1", "tt")},
"IAU2000B": {"function": erfa.gst00b, "scales": ("ut1",)},
"IAU1994": {"function": erfa.gst94, "scales": ("ut1",), "include_tio": False},
},
}
class _LeapSecondsCheck(enum.Enum):
NOT_STARTED = 0 # No thread has reached the check
RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held)
DONE = 2 # update_leap_seconds has completed
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED
_LEAP_SECONDS_LOCK = threading.RLock()
def _compress_array_dims(arr):
"""Compress array by allowing at most 2 * edgeitems + 1 in each dimension.
Parameters
----------
arr : array-like
Array to compress.
Returns
-------
out : array-like
Compressed array.
"""
idxs = []
edgeitems = np.get_printoptions()["edgeitems"]
# Build up a list of index arrays for each dimension, allowing no more than
# 2 * edgeitems + 1 elements in each dimension.
for dim in range(arr.ndim):
if arr.shape[dim] > 2 * edgeitems:
# The middle [edgeitems] value does not matter as it gets replaced
# by ... in the output.
idxs.append(
np.concatenate(
[np.arange(edgeitems), [edgeitems], np.arange(-edgeitems, 0)]
)
)
else:
idxs.append(np.arange(arr.shape[dim]))
# Use the magic np.ix_ function to effectively treat each index array as a
# slicing operator.
idxs_ix = np.ix_(*idxs)
out = arr[idxs_ix]
return out
class TimeInfoBase(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
This base class is common between TimeInfo and TimeDeltaInfo.
"""
attr_names = MixinInfo.attr_names | {"serialize_method"}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = (
"format",
"scale",
"precision",
"in_subfmt",
"out_subfmt",
"location",
"_delta_ut1_utc",
"_delta_tdb_tt",
)
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = "value"
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == "formatted_value":
out = ("value",)
elif method == "jd1_jd2":
out = ("jd1", "jd2")
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {
"fits": "jd1_jd2",
"ecsv": "formatted_value",
"hdf5": "jd1_jd2",
"yaml": "jd1_jd2",
"parquet": "jd1_jd2",
None: "jd1_jd2",
}
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
Returns
-------
arrays : list of ndarray
"""
parent = self._parent
jd_approx = parent.jd
jd_remainder = (parent - parent.__class__(jd_approx, format="jd")).jd
return [jd_approx, jd_remainder]
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(
names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats],
)
)
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict(self, map):
if "jd1" in map and "jd2" in map:
# Initialize as JD but revert to desired format and out_subfmt (if needed)
format = map.pop("format")
out_subfmt = map.pop("out_subfmt", None)
map["format"] = "jd"
map["val"] = map.pop("jd1")
map["val2"] = map.pop("jd2")
out = self._parent_cls(**map)
out.format = format
if out_subfmt is not None:
out.out_subfmt = out_subfmt
else:
map["val"] = map.pop("value")
out = self._parent_cls(**map)
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError("input columns have inconsistent locations")
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
tm_attrs = {
attr: getattr(col0, attr) for attr in ("scale", "location", "precision")
}
out = self._parent_cls(jd1, jd2, format="jd", **tm_attrs)
out.format = col0.format
out.out_subfmt = col0.out_subfmt
out.in_subfmt = col0.in_subfmt
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
map = super()._represent_as_dict(attrs=attrs)
# TODO: refactor these special cases into the TimeFormat classes?
# The datetime64 format requires special handling for ECSV (see #12840).
# The `value` has numpy dtype datetime64 but this is not an allowed
# datatype for ECSV. Instead convert to a string representation.
if (
self._serialize_context == "ecsv"
and map["format"] == "datetime64"
and "value" in map
):
map["value"] = map["value"].astype("U")
# The datetime format is serialized as ISO with no loss of precision.
if map["format"] == "datetime" and "value" in map:
map["value"] = np.vectorize(lambda x: x.isoformat())(map["value"])
return map
def _construct_from_dict(self, map):
# See comment above. May need to convert string back to datetime64.
# Note that _serialize_context is not set here so we just look for the
# string value directly.
if (
map["format"] == "datetime64"
and "value" in map
and map["value"].dtype.kind == "U"
):
map["value"] = map["value"].astype("datetime64")
# Convert back to datetime objects for datetime format.
if map["format"] == "datetime" and "value" in map:
from datetime import datetime
map["value"] = np.vectorize(datetime.fromisoformat)(map["value"])
delta_ut1_utc = map.pop("_delta_ut1_utc", None)
delta_tdb_tt = map.pop("_delta_tdb_tt", None)
out = super()._construct_from_dict(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
class TimeDeltaInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_extra_attrs = ("format", "scale")
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd1 = np.zeros(shape, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
out = self._parent_cls(jd1, jd2, format="jd", scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeBase(ShapedLikeNDArray):
"""Base time class from which Time and TimeDelta inherit."""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __getnewargs__(self):
return (self._time,)
def _init_from_vals(
self,
val,
val2,
format,
scale,
copy,
precision=None,
in_subfmt=None,
out_subfmt=None,
):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = "*"
if out_subfmt is None:
out_subfmt = "*"
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError(
"Input val and val2 have inconsistent shape; "
"they cannot be broadcast together."
)
if scale is not None:
if not (isinstance(scale, str) and scale.lower() in self.SCALES):
raise ScaleValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(self.SCALES)}"
)
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(
val, val2, format, scale, precision, in_subfmt, out_subfmt
)
self._format = self._time.name
# Hack from #9969 to allow passing the location value that has been
# collected by the TimeAstropyTime format class up to the Time level.
# TODO: find a nicer way.
if hasattr(self._time, "_location"):
self.location = self._time._location
del self._time._location
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and (
val.dtype.kind in ("S", "U", "O", "M") or val.dtype.names
):
# Input is a string, object, datetime, or a table-like ndarray
# (structured array, recarray). These input types can be
# uniquely identified by the format classes.
formats = [
(name, cls)
for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)
]
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(("astropy_time", TimeAstropyTime))
elif not (isinstance(format, str) and format.lower() in self.FORMATS):
if format is None:
raise ValueError(
"No time format was given, and the input is not unique"
)
else:
raise ValueError(
f"Format {format!r} is not one of the allowed formats "
f"{sorted(self.FORMATS)}"
)
else:
formats = [(format, self.FORMATS[format])]
assert formats
problems = {}
for name, cls in formats:
try:
return cls(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError) as err:
# If ``format`` specified then there is only one possibility, so raise
# immediately and include the upstream exception message to make it
# easier for user to see what is wrong.
if len(formats) == 1:
raise ValueError(
f"Input values did not match the format class {format}:"
+ os.linesep
+ f"{err.__class__.__name__}: {err}"
) from err
else:
problems[name] = err
else:
raise ValueError(
"Input values did not match any of the formats where the format "
f"keyword is optional: {problems}"
) from problems[formats[0][0]]
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format"""
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
format_cls = self.FORMATS[format]
# Get the new TimeFormat object to contain time in new format. Possibly
# coerce in/out_subfmt to '*' (default) if existing subfmt values are
# not valid in the new format.
self._time = format_cls(
self._time.jd1,
self._time.jd2,
self._time._scale,
self.precision,
in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt),
out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
self._format = format
def to_string(self):
"""Output a string representation of the Time or TimeDelta object.
Similar to ``str(self.value)`` (which uses numpy array formatting) but
array values are evaluated only for the items that actually are output.
For large arrays this can be a substantial performance improvement.
Returns
-------
out : str
String representation of the time values.
"""
npo = np.get_printoptions()
if self.size < npo["threshold"]:
out = str(self.value)
else:
# Compress time object by allowing at most 2 * npo["edgeitems"] + 1
# in each dimension. Then force numpy to use "summary mode" of
# showing only the edge items by setting the size threshold to 0.
# TODO: use np.core.arrayprint._leading_trailing if we have support for
# np.concatenate. See #8610.
tm = _compress_array_dims(self)
with np.printoptions(threshold=0):
out = str(tm.value)
return out
def __repr__(self):
return "<{} object: scale='{}' format='{}' value={}>".format(
self.__class__.__name__, self.scale, self.format, self.to_string()
)
def __str__(self):
return self.to_string()
def __hash__(self):
try:
loc = getattr(self, "location", None)
if loc is not None:
loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m)
return hash((self.jd1, self.jd2, self.scale, loc))
except TypeError:
if self.ndim != 0:
reason = "(must be scalar)"
elif self.masked:
reason = "(value is masked)"
else:
raise
raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}")
@property
def scale(self):
"""Time scale"""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
if scale == "utc" or self.scale == "utc":
# If doing a transform involving UTC then check that the leap
# seconds table is up to date.
_check_leapsec()
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = "_get_delta_{}_{}".format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
jd1, jd2 = day_frac(jd1, jd2)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](
jd1,
jd2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
self._time.in_subfmt = val
del self.cache
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
# Setting the out_subfmt property here does validation of ``val``
self._time.out_subfmt = val
del self.cache
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in (
(self._time, "jd1"),
(self._time, "jd2"),
(self, "_delta_ut1_utc"),
(self, "_delta_tdb_tt"),
(self, "location"),
):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
if self._time.jd1.shape:
if isinstance(value, np.ndarray):
return value
else:
raise TypeError(
f"JD is an array ({self._time.jd1!r}) but value is not ({value!r})"
)
else:
# zero-dimensional array, is it safe to unbox?
if (
isinstance(value, np.ndarray)
and not value.shape
and not np.ma.is_masked(value)
):
if value.dtype.kind == "M":
# existing test doesn't want datetime64 converted
return value[()]
elif value.dtype.fields:
# Unpack but keep field names; .item() doesn't
# Still don't get python types in the fields
return value[()]
else:
return value.item()
else:
return value
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
def to_value(self, format, subfmt="*"):
"""Get time values expressed in specified output format.
This method allows representing the ``Time`` object in the desired
output ``format`` and optional sub-format ``subfmt``. Available
built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each
format can have its own sub-formats
For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can
be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with
the enhancement depending on platform), and 'decimal'
:class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the
number of digits is also chosen such that time values are represented
accurately.
For built-in date-like string formats, one of 'date_hms', 'date_hm', or
'date' (or 'longdate_hms', etc., for 5-digit years in
`~astropy.time.TimeFITS`). For sub-formats including seconds, the
number of digits used for the fractional seconds is as set by
`~astropy.time.Time.precision`.
Parameters
----------
format : str
The format in which one wants the time values. Default: the current
format.
subfmt : str or None, optional
Value or wildcard pattern to select the sub-format in which the
values should be given. The default of '*' picks the first
available for a given format, i.e., 'float' or 'date_hms'.
If `None`, use the instance's ``out_subfmt``.
"""
# TODO: add a precision argument (but ensure it is keyword argument
# only, to make life easier for TimeDelta.to_value()).
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
cache = self.cache["format"]
# Try to keep cache behaviour like it was in astropy < 4.0.
key = format if subfmt is None else (format, subfmt)
if key not in cache:
if format == self.format:
tm = self
else:
tm = self.replicate(format=format)
# Some TimeFormat subclasses may not be able to handle being passes
# on a out_subfmt. This includes some core classes like
# TimeBesselianEpochString that do not have any allowed subfmts. But
# those do deal with `self.out_subfmt` internally, so if subfmt is
# the same, we do not pass it on.
kwargs = {}
if subfmt is not None and subfmt != tm.out_subfmt:
kwargs["out_subfmt"] = subfmt
try:
value = tm._time.to_value(parent=tm, **kwargs)
except TypeError as exc:
# Try validating subfmt, e.g. for formats like 'jyear_str' that
# do not implement out_subfmt in to_value() (because there are
# no allowed subformats). If subfmt is not valid this gives the
# same exception as would have occurred if the call to
# `to_value()` had succeeded.
tm._time._select_subfmts(subfmt)
# Subfmt was valid, so fall back to the original exception to see
# if it was lack of support for out_subfmt as a call arg.
if "unexpected keyword argument 'out_subfmt'" in str(exc):
raise ValueError(
f"to_value() method for format {format!r} does not "
"support passing a 'subfmt' argument"
) from None
else:
# Some unforeseen exception so raise.
raise
value = tm._shaped_like_input(value)
cache[key] = value
return cache[key]
@property
def value(self):
"""Time value(s) in current format"""
return self.to_value(self.format, None)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, self.__class__):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0 : idx0 + n_values] = values
out._time.jd1[idx0 + n_values :] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values :] = self._time.jd2[idx0:]
return out
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError(
f"{self.__class__.__name__} object is read-only. Make a "
'copy() or set "writeable" attribute to True.'
)
else:
raise ValueError(
f"scalar {self.__class__.__name__} object is read-only."
)
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ("_delta_tdb_tt", "_delta_ut1_utc"):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def isclose(self, other, atol=None):
"""Returns a boolean or boolean array where two Time objects are
element-wise equal within a time tolerance.
This evaluates the expression below::
abs(self - other) <= atol
Parameters
----------
other : `~astropy.time.Time`
Time object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is two bits in the 128-bit JD time representation,
equivalent to about 40 picosecs.
"""
if atol is None:
# Note: use 2 bits instead of 1 bit based on experience in precision
# tests, since taking the difference with a UTC time means one has
# to do a scale change.
atol = 2 * np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
try:
# Separate these out so user sees where the problem is
dt = self - other
dt = abs(dt)
out = dt <= atol
except Exception as err:
raise TypeError(
"'other' argument must support subtraction with Time "
"and return a value that supports comparison with "
f"{atol.__class__.__name__}: {err}"
)
return out
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply("copy", format=format)
def replicate(self, format=None, copy=False, cls=None):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply("copy" if copy else "replicate", format=format, cls=cls)
def _apply(self, method, *args, format=None, cls=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == "replicate":
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(cls or self.__class__)
tm._time = TimeJD(
jd1,
jd2,
self.scale,
precision=0,
in_subfmt="*",
out_subfmt="*",
from_jd=True,
)
# Optional ndarray attributes.
for attr in ("_delta_ut1_utc", "_delta_tdb_tt", "location"):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only an array scalar and the method would return a view,
# since in that case nothing would change).
if getattr(val, "shape", ()):
val = apply_method(val)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined and the
# time object is not a scalar (issue #10688).
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError(f"format must be one of {list(tm.FORMATS)}")
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(
tm._time.jd1,
tm._time.jd2,
tm._time._scale,
precision=self.precision,
in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt),
out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
index = [
indices
if i == axis
else np.arange(s).reshape(
(1,) * (i if keepdims or i < axis else i - 1)
+ (s,)
+ (1,) * (ndim - i - (1 if keepdims or i > axis else 2))
)
for i, s in enumerate(self.shape)
]
return tuple(index)
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# First get the minimum at normal precision.
jd1, jd2 = self.jd1, self.jd2
approx = np.min(jd1 + jd2, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (jd1 - approx) + jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = np.max(jd1 + jd2, axis, keepdims=True)
dt = (jd1 - approx) + jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = jd1 + jd2
remainder = (jd1 - approx) + jd2
if axis is None:
return np.lexsort((remainder.ravel(), approx.ravel()))
else:
return np.lexsort(keys=(remainder, approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)]
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
"""Mean along a given axis.
This is similar to :meth:`~numpy.ndarray.mean`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2`` is
used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.mean``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
Similarly, the ``dtype`` argument is also present for compatibility
only; it has no meaning for `Time`.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
dtype : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
out : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for
details.
Returns
-------
m : Time
A new Time instance containing the mean values
"""
if dtype is not None:
raise ValueError("Cannot set ``dtype`` on `Time` instances")
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
where = where & ~self.mask
where_broadcasted = np.broadcast_to(where, self.shape)
kwargs = dict(
axis=axis,
keepdims=keepdims,
where=where,
)
divisor = np.sum(where_broadcasted, axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
raise ValueError(
"Mean over zero elements is not supported as it would give an undefined"
" time;see issue https://github.com/astropy/astropy/issues/6509"
)
jd1, jd2 = day_frac(
val1=np.sum(np.ma.getdata(self.jd1), **kwargs),
val2=np.sum(np.ma.getdata(self.jd2), **kwargs),
divisor=divisor,
)
result = type(self)(
val=jd1,
val2=jd2,
format="jd",
scale=self.scale,
copy=False,
)
result.format = self.format
return result
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache["scale"]
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
return self.to_value(attr, subfmt=None)
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError(
"Cannot convert TimeDelta with "
"undefined scale to any defined scale."
)
else:
raise ScaleValueError(
f"Cannot convert {self.__class__.__name__} with scale "
f"'{self.scale}' to scale '{attr}'"
)
else:
# Should raise AttributeError
return self.__getattribute__(attr)
def __dir__(self):
return sorted(set(super().__dir__()) | set(self.SCALES) | set(self.FORMATS))
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError(
"Attribute shape must match or be broadcastable to that of "
"Time object. Typically, give either a single value or "
"one for each time."
)
return val
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError(
f"Cannot compare {self.__class__.__name__} instances with "
f"scales '{self.scale}' and '{other.scale}'"
)
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.0)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
class Time(TimeBase):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Unix glob to select subformats for parsing input times
out_subfmt : str, optional
Unix glob to select subformat for outputting times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
If not given, assumed to be the center of the Earth for time scale
transformations to and from the solar-system barycenter.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, Time):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(
self,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
if not hasattr(self, "location"):
self.location = None
if isinstance(val, Time):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(
val, val2, format, scale, copy, precision, in_subfmt, out_subfmt
)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (
self.location.size > 1 and self.location.shape != self.shape
):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape, subok=True)
except Exception as err:
raise ValueError(
f"The location with shape {self.location.shape} cannot be "
f"broadcast against time with shape {self.shape}. "
"Typically, either give a single location or one for each time."
) from err
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object"""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif (self_location is None and value.location is not None) or (
self_location is not None and value.location is None
):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError(
"cannot set to Time with different location: expected "
f"location={self_location} and got location={value.location}"
)
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(
value,
scale=self.scale,
format=self.format,
location=self_location,
)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible Time object: {err}"
)
return value
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime : :class:`~astropy.time.Time`
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format="datetime", scale="utc")
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : str, sequence, or ndarray
Objects containing time data of type string
format_string : str
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ("U", "S"):
raise TypeError(
"Expected type is string, a bytes-like object or a sequence "
f"of these. Got dtype '{time_array.dtype.kind}'"
)
to_string = (
str
if time_array.dtype.kind == "U"
else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, "U30"])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}".format(
*time_tuple
)
format = kwargs.pop("format", None)
out = cls(*iterator.operands[1:], format="isot", **kwargs)
if format is not None:
out.format = format
return out
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : str
Format definition of return string.
Returns
-------
formatted : str or numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate("iso")._time.str_kwargs():
date_tuple = date(sk["year"], sk["mon"], sk["day"]).timetuple()
datetime_tuple = (
sk["year"],
sk["mon"],
sk["day"],
sk["hour"],
sk["min"],
sk["sec"],
date_tuple[6],
date_tuple[7],
-1,
)
fmtd_str = format_spec
if "%f" in fmtd_str:
fmtd_str = fmtd_str.replace(
"%f",
"{frac:0{precision}}".format(
frac=sk["fracsec"], precision=self.precision
),
)
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
def light_travel_time(
self, skycoord, kind="barycentric", location=None, ephemeris=None
):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ("barycentric", "heliocentric"):
raise ValueError(
"'kind' parameter must be one of 'heliocentric' or 'barycentric'"
)
if location is None:
if self.location is None:
raise ValueError(
"An EarthLocation needs to be set or passed in to calculate bary- "
"or heliocentric corrections"
)
location = self.location
from astropy.coordinates import (
GCRS,
HCRS,
ICRS,
CartesianRepresentation,
UnitSphericalRepresentation,
solar_system_ephemeris,
)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError(
"Supplied location does not have a valid `get_itrs` method"
)
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == "heliocentric":
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (
skycoord.icrs.represent_as(UnitSphericalRepresentation)
.represent_as(CartesianRepresentation)
.xyz
)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale="tdb")
def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
""" # noqa: E501
if isinstance(longitude, str) and longitude == "tio":
longitude = 0
include_tio = False
else:
include_tio = True
return self._sid_time_or_earth_rot_ang(
longitude=longitude,
function=erfa.era00,
scales=("ut1",),
include_tio=include_tio,
)
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # noqa: E501 (docstring is formatted below)
if kind.lower() not in SIDEREAL_TIME_MODELS:
raise ValueError(
"The kind of sidereal time has to be "
+ " or ".join(sorted(SIDEREAL_TIME_MODELS))
)
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models)[-1]
elif model.upper() not in available_models:
raise ValueError(
f"Model {model} not implemented for {kind} sidereal time; "
f"available models are {sorted(available_models)}"
)
model_kwargs = available_models[model.upper()]
if isinstance(longitude, str) and longitude in ("tio", "greenwich"):
longitude = 0
model_kwargs = model_kwargs.copy()
model_kwargs["include_tio"] = False
return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
"apparent",
sorted(SIDEREAL_TIME_MODELS["apparent"]),
"mean",
sorted(SIDEREAL_TIME_MODELS["mean"]),
)
def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True):
"""Calculate a local sidereal time or Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance.
function : callable
The ERFA function to use.
scales : tuple of str
The time scales that the function requires on input.
include_tio : bool, optional
Whether to includes the TIO locator corrected for polar motion.
Should be `False` for pre-2000 IAU models. Default: `True`.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time or Earth rotation angle, with units of hourangle.
""" # noqa: E501
from astropy.coordinates import EarthLocation, Longitude
from astropy.coordinates.builtin_frames.utils import get_polar_motion
from astropy.coordinates.matrix_utilities import rotation_matrix
if longitude is None:
if self.location is None:
raise ValueError(
"No longitude is given but the location for "
"the Time object is not set."
)
longitude = self.location.lon
elif isinstance(longitude, EarthLocation):
longitude = longitude.lon
else:
# Sanity check on input; default unit is degree.
longitude = Longitude(longitude, u.degree, copy=False)
theta = self._call_erfa(function, scales)
if include_tio:
# TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio;
# maybe posisble to factor out to one or the other.
sp = self._call_erfa(erfa.sp00, ("tt",))
xp, yp = get_polar_motion(self)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (
rotation_matrix(longitude, "z")
@ rotation_matrix(-yp, "x", unit=u.radian)
@ rotation_matrix(-xp, "y", unit=u.radian)
@ rotation_matrix(theta + sp, "z", unit=u.radian)
)
# Solve for angle.
angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian
else:
angle = longitude + (theta << u.radian)
return Longitude(angle, u.hourangle)
def _call_erfa(self, function, scales):
# TODO: allow erfa functions to be used on Time with __array_ufunc__.
erfa_parameters = [
getattr(getattr(self, scale)._time, jd_part)
for scale in scales
for jd_part in ("jd1", "jd2_filled")
]
result = function(*erfa_parameters)
if self.masked:
result[self.mask] = np.nan
return result
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : `~astropy.utils.iers.IERS`, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. Default: `~astropy.utils.iers.earth_orientation_table`
(which in turn defaults to the combined version provided by
`~astropy.utils.iers.IERS_Auto`).
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA
>>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, "_delta_ut1_utc"):
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = "utc"
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == "ut1":
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, "_delta_tdb_tt"):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ("tt", "tdb"):
raise ValueError(
"Accessing the delta_tdb_tt attribute is only "
"possible for TT or TDB time scales"
)
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
# Assume geocentric.
self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0.0, 0.0, 0.0)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1,
jd2,
ut,
lon.to_value(u.radian),
rxy.to_value(u.km),
z.to_value(u.km),
)
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = not isinstance(other, Time)
if other_is_delta: # T - Tdelta
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot subtract Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError(
"Cannot subtract Time instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
self_time = (
self._time if self.scale in TIME_DELTA_SCALES else self.tai._time
)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(
self_time.jd1, self_time.jd2, format="jd", scale=self_time.scale
)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
# T + Tdelta = T
# T + T = error
if isinstance(other, Time):
raise OperandTypeError(self, other, "+")
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot add Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
# Reverse addition is possible: <something-Tdelta-ish> + T
# but there is no case of <something> - T, so no __rsub__.
def __radd__(self, other):
return self.__add__(other)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
scale = self.scale
if scale == "utc":
self = self.tai
result = super().mean(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
if scale == "utc":
result = result.utc
result.out_subfmt = self.out_subfmt
location = self.location
if self.location is not None:
if self.location.shape:
if axis is None:
axis_normalized = tuple(range(self.ndim))
elif isinstance(axis, int):
axis_normalized = (axis,)
else:
axis_normalized = axis
sl = [slice(None)] * self.location.ndim
for a in axis_normalized:
sl[a] = slice(0, 1)
if np.any(self.location != self.location[tuple(sl)]):
raise ValueError(
"`location` must be constant over the reduction axes."
)
if not keepdims:
for a in axis_normalized:
sl[a] = 0
location = self.location[tuple(sl)]
result.location = location
return result
def __array_function__(self, function, types, args, kwargs):
"""
Wrap numpy functions.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
"""
if function in CUSTOM_FUNCTIONS:
f = CUSTOM_FUNCTIONS[function]
return f(*args, **kwargs)
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
return super().__array_function__(function, types, args, kwargs)
def to_datetime(self, timezone=None):
# TODO: this could likely go through to_value, as long as that
# had an **kwargs part that was just passed on to _time.
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning):
"""Warning for missing unit or format in TimeDelta"""
pass
class TimeDelta(TimeBase):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
See also:
- https://docs.astropy.org/en/stable/time/
- https://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s). For numerical inputs without units,
"jd" is assumed and values are interpreted as days.
A deprecation warning is raised in this case. To avoid the warning,
either specify the format or add units to the input values.
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, TimeDelta):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
format = format or self._get_format(val)
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
@staticmethod
def _get_format(val):
if isinstance(val, timedelta):
return "datetime"
if getattr(val, "unit", None) is None:
warn(
"Numerical value without unit or explicit format passed to"
" TimeDelta, assuming days",
TimeDeltaMissingUnitWarning,
)
return "jd"
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1,
jd2 + offset2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
def _add_sub(self, other, op):
"""Perform common elements of addition / subtraction for two delta times"""
# If not a TimeDelta then see if it can be turned into a TimeDelta.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
raise TypeError(
"Cannot add TimeDelta instances with scales '{}' and '{}'".format(
self.scale, other.scale
)
)
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = op(self._time.jd1, other._time.jd1)
jd2 = op(self._time.jd2, other._time.jd2)
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __add__(self, other):
# If other is a Time then use Time.__add__ to do the calculation.
if isinstance(other, Time):
return other.__add__(self)
return self._add_sub(other, operator.add)
def __sub__(self, other):
# TimeDelta - Time is an error
if isinstance(other, Time):
raise OperandTypeError(self, other, "-")
return self._add_sub(other, operator.sub)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, "*")
elif (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, unit, equivalencies=[]):
"""
Convert to a quantity in the specified unit.
Parameters
----------
unit : unit-like
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globallyq
or within a context.
Returns
-------
quantity : `~astropy.units.Quantity`
The quantity in the units specified.
See also
--------
to_value : get the numerical value in a given unit.
"""
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(
unit, equivalencies=equivalencies
)
def to_value(self, *args, **kwargs):
"""Get time delta values expressed in specified output format or unit.
This method is flexible and handles both conversion to a specified
``TimeDelta`` format / sub-format AND conversion to a specified unit.
If positional argument(s) are provided then the first one is checked
to see if it is a valid ``TimeDelta`` format, and next it is checked
to see if it is a valid unit or unit string.
To convert to a ``TimeDelta`` format and optional sub-format the options
are::
tm = TimeDelta(1.0 * u.s)
tm.to_value('jd') # equivalent of tm.jd
tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object
tm.to_value('jd', subfmt='decimal')
tm.to_value(format='jd', subfmt='decimal')
To convert to a unit with optional equivalencies, the options are::
tm.to_value('hr') # convert to u.hr (hours)
tm.to_value('hr', []) # specify equivalencies as a positional arg
tm.to_value('hr', equivalencies=[])
tm.to_value(unit='hr', equivalencies=[])
The built-in `~astropy.time.TimeDelta` options for ``format`` are:
{'jd', 'sec', 'datetime'}.
For the two numerical formats 'jd' and 'sec', the available ``subfmt``
options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with the
enhancement depending on platform), and 'decimal' instances of
:class:`decimal.Decimal` for full precision. For the 'str' and 'bytes'
sub-formats, the number of digits is also chosen such that time values
are represented accurately. Default: as set by ``out_subfmt`` (which by
default picks the first available for a given format, i.e., 'float').
Parameters
----------
format : str, optional
The format in which one wants the `~astropy.time.TimeDelta` values.
Default: the current format.
subfmt : str, optional
Possible sub-format in which the values should be given. Default: as
set by ``out_subfmt`` (which by default picks the first available
for a given format, i.e., 'float' or 'date_hms').
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globally or
within a context.
Returns
-------
value : ndarray or scalar
The value in the format or units specified.
See also
--------
to : Convert to a `~astropy.units.Quantity` instance in a given unit.
value : The time value in the current format.
"""
if not (args or kwargs):
raise TypeError("to_value() missing required format or unit argument")
# TODO: maybe allow 'subfmt' also for units, keeping full precision
# (effectively, by doing the reverse of quantity_day_frac)?
# This way, only equivalencies could lead to possible precision loss.
if "format" in kwargs or (
args != () and (args[0] is None or args[0] in self.FORMATS)
):
# Super-class will error with duplicate arguments, etc.
return super().to_value(*args, **kwargs)
# With positional arguments, we try parsing the first one as a unit,
# so that on failure we can give a more informative exception.
if args:
try:
unit = u.Unit(args[0])
except ValueError as exc:
raise ValueError(
"first argument is not one of the known "
f"formats ({list(self.FORMATS)}) and failed to parse as a unit."
) from exc
args = (unit,) + args[1:]
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(
*args, **kwargs
)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object"""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible TimeDelta object: {err}"
)
return value
def isclose(self, other, atol=None, rtol=0.0):
"""Returns a boolean or boolean array where two TimeDelta objects are
element-wise equal within a time tolerance.
This effectively evaluates the expression below::
abs(self - other) <= atol + rtol * abs(other)
Parameters
----------
other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Quantity or TimeDelta object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is one bit in the 128-bit JD time representation,
equivalent to about 20 picosecs.
rtol : float
Relative tolerance for equality
"""
try:
other_day = other.to_value(u.day)
except Exception as err:
raise TypeError(f"'other' argument must support conversion to days: {err}")
if atol is None:
atol = np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
return np.isclose(
self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)
)
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time):
dtype = object
else:
dtype = None
val = np.array(val, copy=copy, subok=True, dtype=dtype)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize:
pass
elif val.dtype.kind in "OSUMaV":
pass
else:
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = "" if op is None else f" for {op}"
super().__init__(
"Unsupported operand type(s){}: '{}' and '{}'".format(
op_string, left.__class__.__name__, right.__class__.__name__
)
)
def _check_leapsec():
global _LEAP_SECONDS_CHECK
if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE:
with _LEAP_SECONDS_LOCK:
# There are three ways we can get here:
# 1. First call (NOT_STARTED).
# 2. Re-entrant call (RUNNING). We skip the initialisation
# and don't worry about leap second errors.
# 3. Another thread which raced with the first call
# (RUNNING). The first thread has relinquished the
# lock to us, so initialization is complete.
if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED:
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING
update_leap_seconds()
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE
def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn(
f"leap-second auto-update failed due to the following exception: {exc!r}",
AstropyWarning,
)
return 0
|
ec6e09c4344d1c6689a295720eab1f7f780c8eff2389963539aacb3defc24018 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines colloquially used Imperial units. They are
available in the `astropy.units.imperial` namespace, but not in the
top-level `astropy.units` namespace, e.g.::
>>> import astropy.units as u
>>> mph = u.imperial.mile / u.hour
>>> mph
Unit("mi / h")
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> import astropy.units as u
>>> u.imperial.enable() # doctest: +SKIP
"""
from . import si
from .core import UnitBase, def_unit
_ns = globals()
###########################################################################
# LENGTH
def_unit(["inch"], 2.54 * si.cm, namespace=_ns, doc="International inch")
def_unit(["ft", "foot"], 12 * inch, namespace=_ns, doc="International foot")
def_unit(["yd", "yard"], 3 * ft, namespace=_ns, doc="International yard")
def_unit(["mi", "mile"], 5280 * ft, namespace=_ns, doc="International mile")
def_unit(["mil", "thou"], 0.001 * inch, namespace=_ns, doc="Thousandth of an inch")
def_unit(["nmi", "nauticalmile", "NM"], 1852 * si.m, namespace=_ns, doc="Nautical mile")
def_unit(["fur", "furlong"], 660 * ft, namespace=_ns, doc="Furlong")
###########################################################################
# AREAS
def_unit(["ac", "acre"], 43560 * ft**2, namespace=_ns, doc="International acre")
###########################################################################
# VOLUMES
def_unit(["gallon"], si.liter / 0.264172052, namespace=_ns, doc="U.S. liquid gallon")
def_unit(["quart"], gallon / 4, namespace=_ns, doc="U.S. liquid quart")
def_unit(["pint"], quart / 2, namespace=_ns, doc="U.S. liquid pint")
def_unit(["cup"], pint / 2, namespace=_ns, doc="U.S. customary cup")
def_unit(
["foz", "fluid_oz", "fluid_ounce"], cup / 8, namespace=_ns, doc="U.S. fluid ounce"
)
def_unit(
["tbsp", "tablespoon"], foz / 2, namespace=_ns, doc="U.S. customary tablespoon"
)
def_unit(["tsp", "teaspoon"], tbsp / 3, namespace=_ns, doc="U.S. customary teaspoon")
###########################################################################
# MASS
def_unit(
["oz", "ounce"],
28.349523125 * si.g,
namespace=_ns,
doc="International avoirdupois ounce: mass",
)
def_unit(
["lb", "lbm", "pound"],
16 * oz,
namespace=_ns,
doc="International avoirdupois pound: mass",
)
def_unit(
["st", "stone"], 14 * lb, namespace=_ns, doc="International avoirdupois stone: mass"
)
def_unit(["ton"], 2000 * lb, namespace=_ns, doc="International avoirdupois ton: mass")
def_unit(["slug"], 32.174049 * lb, namespace=_ns, doc="slug: mass")
###########################################################################
# SPEED
def_unit(
["kn", "kt", "knot", "NMPH"],
nmi / si.h,
namespace=_ns,
doc="nautical unit of speed: 1 nmi per hour",
)
###########################################################################
# FORCE
def_unit("lbf", slug * ft * si.s**-2, namespace=_ns, doc="Pound: force")
def_unit(["kip", "kilopound"], 1000 * lbf, namespace=_ns, doc="Kilopound: force")
##########################################################################
# ENERGY
def_unit(["BTU", "btu"], 1.05505585 * si.kJ, namespace=_ns, doc="British thermal unit")
def_unit(
["cal", "calorie"],
4.184 * si.J,
namespace=_ns,
doc="Thermochemical calorie: pre-SI metric unit of energy",
)
def_unit(
["kcal", "Cal", "Calorie", "kilocal", "kilocalorie"],
1000 * cal,
namespace=_ns,
doc="Calorie: colloquial definition of Calorie",
)
##########################################################################
# PRESSURE
def_unit("psi", lbf * inch**-2, namespace=_ns, doc="Pound per square inch: pressure")
###########################################################################
# POWER
# Imperial units
def_unit(
["hp", "horsepower"],
si.W / 0.00134102209,
namespace=_ns,
doc="Electrical horsepower",
)
###########################################################################
# TEMPERATURE
def_unit(
["deg_F", "Fahrenheit"],
namespace=_ns,
doc="Degrees Fahrenheit",
format={"latex": r"{}^{\circ}F", "unicode": "°F"},
)
def_unit(
["deg_R", "Rankine"],
namespace=_ns,
doc="Rankine scale: absolute scale of thermodynamic temperature",
)
###########################################################################
# CLEANUP
del UnitBase
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable Imperial units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable Imperial
units only temporarily.
"""
# Local import to avoid cyclical import
# Local import to avoid polluting namespace
import inspect
from .core import add_enabled_units
return add_enabled_units(inspect.getmodule(enable))
|
775b8f8807f60cf26888809778d50303c6921a7a3ef1c19dbe72395d52ab8da6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units used in the CDS format, both the units
defined in `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical Catalogues 2.0
<http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_ format and the `complete
set of supported units <https://vizier.u-strasbg.fr/viz-bin/Unit>`_.
This format is used by VOTable up to version 1.2.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.cds`
module::
>>> from astropy.units import cds
>>> q = 10. * cds.lyr # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import cds
>>> cds.enable() # doctest: +SKIP
"""
_ns = globals()
def _initialize_module():
"""Initialize CDS units module."""
# Local imports to avoid polluting top-level namespace
import numpy as np
from astropy import units as u
from astropy.constants import si as _si
from . import core
# The CDS format also supports power-of-2 prefixes as defined here:
# http://physics.nist.gov/cuu/Units/binary.html
prefixes = core.si_prefixes + core.binary_prefixes
# CDS only uses the short prefixes
prefixes = [(short, short, factor) for (short, long, factor) in prefixes]
# The following units are defined in alphabetical order, directly from
# here: https://vizier.u-strasbg.fr/viz-bin/Unit
mapping = [
(["A"], u.A, "Ampere"),
(["a"], u.a, "year", ["P"]),
(["a0"], _si.a0, "Bohr radius"),
(["al"], u.lyr, "Light year", ["c", "d"]),
(["lyr"], u.lyr, "Light year"),
(["alpha"], _si.alpha, "Fine structure constant"),
((["AA", "Å"], ["Angstrom", "Angstroem"]), u.AA, "Angstrom"),
(["arcmin", "arcm"], u.arcminute, "minute of arc"),
(["arcsec", "arcs"], u.arcsecond, "second of arc"),
(["atm"], _si.atm, "atmosphere"),
(["AU", "au"], u.au, "astronomical unit"),
(["bar"], u.bar, "bar"),
(["barn"], u.barn, "barn"),
(["bit"], u.bit, "bit"),
(["byte"], u.byte, "byte"),
(["C"], u.C, "Coulomb"),
(["c"], _si.c, "speed of light", ["p"]),
(["cal"], 4.1854 * u.J, "calorie"),
(["cd"], u.cd, "candela"),
(["ct"], u.ct, "count"),
(["D"], u.D, "Debye (dipole)"),
(["d"], u.d, "Julian day", ["c"]),
((["deg", "°"], ["degree"]), u.degree, "degree"),
(["dyn"], u.dyn, "dyne"),
(["e"], _si.e, "electron charge", ["m"]),
(["eps0"], _si.eps0, "electric constant"),
(["erg"], u.erg, "erg"),
(["eV"], u.eV, "electron volt"),
(["F"], u.F, "Farad"),
(["G"], _si.G, "Gravitation constant"),
(["g"], u.g, "gram"),
(["gauss"], u.G, "Gauss"),
(["geoMass", "Mgeo"], u.M_earth, "Earth mass"),
(["H"], u.H, "Henry"),
(["h"], u.h, "hour", ["p"]),
(["hr"], u.h, "hour"),
(["\\h"], _si.h, "Planck constant"),
(["Hz"], u.Hz, "Hertz"),
(["inch"], 0.0254 * u.m, "inch"),
(["J"], u.J, "Joule"),
(["JD"], u.d, "Julian day", ["M"]),
(["jovMass", "Mjup"], u.M_jup, "Jupiter mass"),
(["Jy"], u.Jy, "Jansky"),
(["K"], u.K, "Kelvin"),
(["k"], _si.k_B, "Boltzmann"),
(["l"], u.l, "litre", ["a"]),
(["lm"], u.lm, "lumen"),
(["Lsun", "solLum"], u.solLum, "solar luminosity"),
(["lx"], u.lx, "lux"),
(["m"], u.m, "meter"),
(["mag"], u.mag, "magnitude"),
(["me"], _si.m_e, "electron mass"),
(["min"], u.minute, "minute"),
(["MJD"], u.d, "Julian day"),
(["mmHg"], 133.322387415 * u.Pa, "millimeter of mercury"),
(["mol"], u.mol, "mole"),
(["mp"], _si.m_p, "proton mass"),
(["Msun", "solMass"], u.solMass, "solar mass"),
((["mu0", "µ0"], []), _si.mu0, "magnetic constant"),
(["muB"], _si.muB, "Bohr magneton"),
(["N"], u.N, "Newton"),
(["Ohm"], u.Ohm, "Ohm"),
(["Pa"], u.Pa, "Pascal"),
(["pc"], u.pc, "parsec"),
(["ph"], u.ph, "photon"),
(["pi"], u.Unit(np.pi), "π"),
(["pix"], u.pix, "pixel"),
(["ppm"], u.Unit(1e-6), "parts per million"),
(["R"], _si.R, "gas constant"),
(["rad"], u.radian, "radian"),
(["Rgeo"], _si.R_earth, "Earth equatorial radius"),
(["Rjup"], _si.R_jup, "Jupiter equatorial radius"),
(["Rsun", "solRad"], u.solRad, "solar radius"),
(["Ry"], u.Ry, "Rydberg"),
(["S"], u.S, "Siemens"),
(["s", "sec"], u.s, "second"),
(["sr"], u.sr, "steradian"),
(["Sun"], u.Sun, "solar unit"),
(["T"], u.T, "Tesla"),
(["t"], 1e3 * u.kg, "metric tonne", ["c"]),
(["u"], _si.u, "atomic mass", ["da", "a"]),
(["V"], u.V, "Volt"),
(["W"], u.W, "Watt"),
(["Wb"], u.Wb, "Weber"),
(["yr"], u.a, "year"),
]
for entry in mapping:
if len(entry) == 3:
names, unit, doc = entry
excludes = []
else:
names, unit, doc, excludes = entry
core.def_unit(
names,
unit,
prefixes=prefixes,
namespace=_ns,
doc=doc,
exclude_prefixes=excludes,
)
core.def_unit(["µas"], u.microarcsecond, doc="microsecond of arc", namespace=_ns)
core.def_unit(["mas"], u.milliarcsecond, doc="millisecond of arc", namespace=_ns)
core.def_unit(
["---", "-"],
u.dimensionless_unscaled,
doc="dimensionless and unscaled",
namespace=_ns,
)
core.def_unit(["%"], u.percent, doc="percent", namespace=_ns)
# The Vizier "standard" defines this in units of "kg s-3", but
# that may not make a whole lot of sense, so here we just define
# it as its own new disconnected unit.
core.def_unit(["Crab"], prefixes=prefixes, namespace=_ns, doc="Crab (X-ray) flux")
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable CDS units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`. This will disable
all of the "default" `astropy.units` units, since there
are some namespace clashes between the two.
This may be used with the ``with`` statement to enable CDS
units only temporarily.
"""
# Local imports to avoid cyclical import and polluting namespace
import inspect
from .core import set_enabled_units
return set_enabled_units(inspect.getmodule(enable))
|
ab1218fe4130b172023e47f4fad601432891ed221fbb460e7954357ea1a395b7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines magnitude zero points and related photometric quantities.
The corresponding magnitudes are given in the description of each unit
(the actual definitions are in `~astropy.units.function.logarithmic`).
"""
import numpy as _numpy
from astropy.constants import si as _si
from . import astrophys, cgs, si
from .core import Unit, UnitBase, def_unit
_ns = globals()
def_unit(
["Bol", "L_bol"],
_si.L_bol0,
namespace=_ns,
prefixes=False,
doc=(
"Luminosity corresponding to absolute bolometric magnitude zero "
"(magnitude ``M_bol``)."
),
)
def_unit(
["bol", "f_bol"],
_si.L_bol0 / (4 * _numpy.pi * (10.0 * astrophys.pc) ** 2),
namespace=_ns,
prefixes=False,
doc=(
"Irradiance corresponding to appparent bolometric magnitude zero "
"(magnitude ``m_bol``)."
),
)
def_unit(
["AB", "ABflux"],
10.0 ** (48.6 / -2.5) * cgs.erg * cgs.cm**-2 / si.s / si.Hz,
namespace=_ns,
prefixes=False,
doc="AB magnitude zero flux density (magnitude ``ABmag``).",
)
def_unit(
["ST", "STflux"],
10.0 ** (21.1 / -2.5) * cgs.erg * cgs.cm**-2 / si.s / si.AA,
namespace=_ns,
prefixes=False,
doc="ST magnitude zero flux density (magnitude ``STmag``).",
)
def_unit(
["mgy", "maggy"],
namespace=_ns,
prefixes=[(["n"], ["nano"], 1e-9)],
doc=(
"Maggies - a linear flux unit that is the flux for a mag=0 object."
"To tie this onto a specific calibrated unit system, the "
"zero_point_flux equivalency should be used."
),
)
def zero_point_flux(flux0):
"""
An equivalency for converting linear flux units ("maggys") defined relative
to a standard source into a standardized system.
Parameters
----------
flux0 : `~astropy.units.Quantity`
The flux of a magnitude-0 object in the "maggy" system.
"""
flux_unit0 = Unit(flux0)
return [(maggy, flux_unit0)]
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del cgs, si, astrophys
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
1cae8b2f58e14a5e62aa5a56c1fc1997fa5cf7ce2b48c5d25aa915dc25e83c4d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines SI prefixed units that are required by the VOUnit standard
but that are rarely used in practice and liable to lead to confusion (such as
``msolMass`` for milli-solar mass). They are in a separate module from
`astropy.units.deprecated` because they need to be enabled by default for
`astropy.units` to parse compliant VOUnit strings. As a result, e.g.,
``Unit('msolMass')`` will just work, but to access the unit directly, use
``astropy.units.required_by_vounit.msolMass`` instead of the more typical idiom
possible for the non-prefixed unit, ``astropy.units.solMass``.
"""
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import astrophys
from .core import _add_prefixes
_add_prefixes(astrophys.solMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solLum, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
__doc__ += _generate_prefixonly_unit_summary(globals())
def _enable():
"""
Enable the VOUnit-required extra units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`, and are recognized in the ``Unit('...')``
idiom.
"""
# Local import to avoid cyclical import
# Local import to avoid polluting namespace
import inspect
from .core import add_enabled_units
return add_enabled_units(inspect.getmodule(_enable))
# Because these are VOUnit mandated units, they start enabled (which is why the
# function is hidden).
_enable()
|
59347f4a2b8bfe48fe103ff042fc1caffcaed9cf7acf3df2fa7cc37edab9830c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the astrophysics-specific units. They are also
available in the `astropy.units` namespace.
"""
from astropy.constants import si as _si
from . import si
from .core import UnitBase, def_unit, set_enabled_units
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# LENGTH
def_unit(
(["AU", "au"], ["astronomical_unit"]),
_si.au,
namespace=_ns,
prefixes=True,
doc="astronomical unit: approximately the mean Earth--Sun distance.",
)
def_unit(
["pc", "parsec"],
_si.pc,
namespace=_ns,
prefixes=True,
doc="parsec: approximately 3.26 light-years.",
)
def_unit(
["solRad", "R_sun", "Rsun"],
_si.R_sun,
namespace=_ns,
doc="Solar radius",
prefixes=False,
format={"latex": r"R_{\odot}", "unicode": "R\N{SUN}"},
)
def_unit(
["jupiterRad", "R_jup", "Rjup", "R_jupiter", "Rjupiter"],
_si.R_jup,
namespace=_ns,
prefixes=False,
doc="Jupiter radius",
# LaTeX jupiter symbol requires wasysym
format={"latex": r"R_{\rm J}", "unicode": "R\N{JUPITER}"},
)
def_unit(
["earthRad", "R_earth", "Rearth"],
_si.R_earth,
namespace=_ns,
prefixes=False,
doc="Earth radius",
# LaTeX earth symbol requires wasysym
format={"latex": r"R_{\oplus}", "unicode": "R⊕"},
)
def_unit(
["lyr", "lightyear"],
(_si.c * si.yr).to(si.m),
namespace=_ns,
prefixes=True,
doc="Light year",
)
def_unit(
["lsec", "lightsecond"],
(_si.c * si.s).to(si.m),
namespace=_ns,
prefixes=False,
doc="Light second",
)
###########################################################################
# MASS
def_unit(
["solMass", "M_sun", "Msun"],
_si.M_sun,
namespace=_ns,
prefixes=False,
doc="Solar mass",
format={"latex": r"M_{\odot}", "unicode": "M\N{SUN}"},
)
def_unit(
["jupiterMass", "M_jup", "Mjup", "M_jupiter", "Mjupiter"],
_si.M_jup,
namespace=_ns,
prefixes=False,
doc="Jupiter mass",
# LaTeX jupiter symbol requires wasysym
format={"latex": r"M_{\rm J}", "unicode": "M\N{JUPITER}"},
)
def_unit(
["earthMass", "M_earth", "Mearth"],
_si.M_earth,
namespace=_ns,
prefixes=False,
doc="Earth mass",
# LaTeX earth symbol requires wasysym
format={"latex": r"M_{\oplus}", "unicode": "M⊕"},
)
##########################################################################
# ENERGY
# Here, explicitly convert the planck constant to 'eV s' since the constant
# can override that to give a more precise value that takes into account
# covariances between e and h. Eventually, this may also be replaced with
# just `_si.Ryd.to(eV)`.
def_unit(
["Ry", "rydberg"],
(_si.Ryd * _si.c * _si.h.to(si.eV * si.s)).to(si.eV),
namespace=_ns,
prefixes=True,
doc="Rydberg: Energy of a photon whose wavenumber is the Rydberg constant",
format={"latex": r"R_{\infty}", "unicode": "R∞"},
)
###########################################################################
# ILLUMINATION
def_unit(
["solLum", "L_sun", "Lsun"],
_si.L_sun,
namespace=_ns,
prefixes=False,
doc="Solar luminance",
format={"latex": r"L_{\odot}", "unicode": "L\N{SUN}"},
)
###########################################################################
# SPECTRAL DENSITY
def_unit(
(["ph", "photon"], ["photon"]),
format={"ogip": "photon", "vounit": "photon"},
namespace=_ns,
prefixes=True,
)
def_unit(
["Jy", "Jansky", "jansky"],
1e-26 * si.W / si.m**2 / si.Hz,
namespace=_ns,
prefixes=True,
doc="Jansky: spectral flux density",
)
def_unit(
["R", "Rayleigh", "rayleigh"],
(1e10 / (4 * _numpy.pi)) * ph * si.m**-2 * si.s**-1 * si.sr**-1,
namespace=_ns,
prefixes=True,
doc="Rayleigh: photon flux",
)
###########################################################################
# EVENTS
def_unit(
(["ct", "count"], ["count"]),
format={"fits": "count", "ogip": "count", "vounit": "count"},
namespace=_ns,
prefixes=True,
exclude_prefixes=["p"],
)
def_unit(
["adu"],
namespace=_ns,
prefixes=True,
)
def_unit(
["DN", "dn"],
namespace=_ns,
prefixes=False,
)
###########################################################################
# MISCELLANEOUS
# Some of these are very FITS-specific and perhaps considered a mistake.
# Maybe they should be moved into the FITS format class?
# TODO: This is defined by the FITS standard as "relative to the sun".
# Is that mass, volume, what?
def_unit(
["Sun"],
namespace=_ns,
)
def_unit(
["chan"],
namespace=_ns,
prefixes=True,
)
def_unit(
["bin"],
namespace=_ns,
prefixes=True,
)
def_unit(
["beam"],
namespace=_ns,
prefixes=True,
)
def_unit(
["electron"],
doc="Number of electrons",
namespace=_ns,
format={"latex": r"e^{-}", "unicode": "e⁻"},
)
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
# -------------------------------------------------------------------------
def __getattr__(attr):
if attr == "littleh":
import warnings
from astropy.cosmology.units import littleh
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"`littleh` is deprecated from module `astropy.units.astrophys` "
"since astropy 5.0 and may be removed in a future version. "
"Use `astropy.cosmology.units.littleh` instead.",
AstropyDeprecationWarning,
)
return littleh
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
0ec4b20598641c64aa48607c95cca61c536bb4abde7b1864defee7baacb4d9e0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Defines the physical types that correspond to different units."""
import numbers
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
from . import imperial # for bkwd compat #11975 and #11977 # noqa: F401
from . import astrophys, cgs, core, misc, quantity, si
__all__ = ["def_physical_type", "get_physical_type", "PhysicalType"]
_units_and_physical_types = [
(core.dimensionless_unscaled, "dimensionless"),
(si.m, "length"),
(si.m**2, "area"),
(si.m**3, "volume"),
(si.s, "time"),
(si.rad, "angle"),
(si.sr, "solid angle"),
(si.m / si.s, {"speed", "velocity"}),
(si.m / si.s**2, "acceleration"),
(si.Hz, "frequency"),
(si.g, "mass"),
(si.mol, "amount of substance"),
(si.K, "temperature"),
(si.W * si.m**-1 * si.K**-1, "thermal conductivity"),
(si.J * si.K**-1, {"heat capacity", "entropy"}),
(si.J * si.K**-1 * si.kg**-1, {"specific heat capacity", "specific entropy"}),
(si.N, "force"),
(si.J, {"energy", "work", "torque"}),
(si.J * si.m**-2 * si.s**-1, {"energy flux", "irradiance"}),
(si.Pa, {"pressure", "energy density", "stress"}),
(si.W, {"power", "radiant flux"}),
(si.kg * si.m**-3, "mass density"),
(si.m**3 / si.kg, "specific volume"),
(si.mol / si.m**3, "molar concentration"),
(si.m**3 / si.mol, "molar volume"),
(si.kg * si.m / si.s, {"momentum", "impulse"}),
(si.kg * si.m**2 / si.s, {"angular momentum", "action"}),
(si.rad / si.s, {"angular speed", "angular velocity", "angular frequency"}),
(si.rad / si.s**2, "angular acceleration"),
(si.rad / si.m, "plate scale"),
(si.g / (si.m * si.s), "dynamic viscosity"),
(si.m**2 / si.s, {"diffusivity", "kinematic viscosity"}),
(si.m**-1, "wavenumber"),
(si.m**-2, "column density"),
(si.A, "electrical current"),
(si.C, "electrical charge"),
(si.V, "electrical potential"),
(si.Ohm, {"electrical resistance", "electrical impedance", "electrical reactance"}),
(si.Ohm * si.m, "electrical resistivity"),
(si.S, "electrical conductance"),
(si.S / si.m, "electrical conductivity"),
(si.F, "electrical capacitance"),
(si.C * si.m, "electrical dipole moment"),
(si.A / si.m**2, "electrical current density"),
(si.V / si.m, "electrical field strength"),
(
si.C / si.m**2,
{"electrical flux density", "surface charge density", "polarization density"},
),
(si.C / si.m**3, "electrical charge density"),
(si.F / si.m, "permittivity"),
(si.Wb, "magnetic flux"),
(si.T, "magnetic flux density"),
(si.A / si.m, "magnetic field strength"),
(si.m**2 * si.A, "magnetic moment"),
(si.H / si.m, {"electromagnetic field strength", "permeability"}),
(si.H, "inductance"),
(si.cd, "luminous intensity"),
(si.lm, "luminous flux"),
(si.lx, {"luminous emittance", "illuminance"}),
(si.W / si.sr, "radiant intensity"),
(si.cd / si.m**2, "luminance"),
(si.m**-3 * si.s**-1, "volumetric rate"),
(astrophys.Jy, "spectral flux density"),
(si.W * si.m**2 * si.Hz**-1, "surface tension"),
(si.J * si.m**-3 * si.s**-1, {"spectral flux density wav", "power density"}),
(astrophys.photon / si.Hz / si.cm**2 / si.s, "photon flux density"),
(astrophys.photon / si.AA / si.cm**2 / si.s, "photon flux density wav"),
(astrophys.R, "photon flux"),
(misc.bit, "data quantity"),
(misc.bit / si.s, "bandwidth"),
(cgs.Franklin, "electrical charge (ESU)"),
(cgs.statampere, "electrical current (ESU)"),
(cgs.Biot, "electrical current (EMU)"),
(cgs.abcoulomb, "electrical charge (EMU)"),
(si.m * si.s**-3, {"jerk", "jolt"}),
(si.m * si.s**-4, {"snap", "jounce"}),
(si.m * si.s**-5, "crackle"),
(si.m * si.s**-6, {"pop", "pounce"}),
(si.K / si.m, "temperature gradient"),
(si.J / si.kg, "specific energy"),
(si.mol * si.m**-3 * si.s**-1, "reaction rate"),
(si.kg * si.m**2, "moment of inertia"),
(si.mol / si.s, "catalytic activity"),
(si.J * si.K**-1 * si.mol**-1, "molar heat capacity"),
(si.mol / si.kg, "molality"),
(si.m * si.s, "absement"),
(si.m * si.s**2, "absity"),
(si.m**3 / si.s, "volumetric flow rate"),
(si.s**-2, "frequency drift"),
(si.Pa**-1, "compressibility"),
(astrophys.electron * si.m**-3, "electron density"),
(astrophys.electron * si.m**-2 * si.s**-1, "electron flux"),
(si.kg / si.m**2, "surface mass density"),
(si.W / si.m**2 / si.sr, "radiance"),
(si.J / si.mol, "chemical potential"),
(si.kg / si.m, "linear density"),
(si.H**-1, "magnetic reluctance"),
(si.W / si.K, "thermal conductance"),
(si.K / si.W, "thermal resistance"),
(si.K * si.m / si.W, "thermal resistivity"),
(si.N / si.s, "yank"),
(si.S * si.m**2 / si.mol, "molar conductivity"),
(si.m**2 / si.V / si.s, "electrical mobility"),
(si.lumen / si.W, "luminous efficacy"),
(si.m**2 / si.kg, {"opacity", "mass attenuation coefficient"}),
(si.kg * si.m**-2 * si.s**-1, {"mass flux", "momentum density"}),
(si.m**-3, "number density"),
(si.m**-2 * si.s**-1, "particle flux"),
]
_physical_unit_mapping = {}
_unit_physical_mapping = {}
_name_physical_mapping = {}
# mapping from attribute-accessible name (no spaces, etc.) to the actual name.
_attrname_physical_mapping = {}
def _physical_type_from_str(name):
"""
Return the `PhysicalType` instance associated with the name of a
physical type.
"""
if name == "unknown":
raise ValueError("cannot uniquely identify an 'unknown' physical type.")
elif name in _attrname_physical_mapping:
return _attrname_physical_mapping[name] # convert attribute-accessible
elif name in _name_physical_mapping:
return _name_physical_mapping[name]
else:
raise ValueError(f"{name!r} is not a known physical type.")
def _replace_temperatures_with_kelvin(unit):
"""
If a unit contains a temperature unit besides kelvin, then replace
that unit with kelvin.
Temperatures cannot be converted directly between K, °F, °C, and
°Ra, in particular since there would be different conversions for
T and ΔT. However, each of these temperatures each represents the
physical type. Replacing the different temperature units with
kelvin allows the physical type to be treated consistently.
"""
physical_type_id = unit._get_physical_type_id()
physical_type_id_components = []
substitution_was_made = False
for base, power in physical_type_id:
if base in ["deg_F", "deg_C", "deg_R"]:
base = "K"
substitution_was_made = True
physical_type_id_components.append((base, power))
if substitution_was_made:
return core.Unit._from_physical_type_id(tuple(physical_type_id_components))
else:
return unit
def _standardize_physical_type_names(physical_type_input):
"""
Convert a string or `set` of strings into a `set` containing
string representations of physical types.
The strings provided in ``physical_type_input`` can each contain
multiple physical types that are separated by a regular slash.
Underscores are treated as spaces so that variable names could
be identical to physical type names.
"""
if isinstance(physical_type_input, str):
physical_type_input = {physical_type_input}
standardized_physical_types = set()
for ptype_input in physical_type_input:
if not isinstance(ptype_input, str):
raise ValueError(f"expecting a string, but got {ptype_input}")
input_set = set(ptype_input.split("/"))
processed_set = {s.strip().replace("_", " ") for s in input_set}
standardized_physical_types |= processed_set
return standardized_physical_types
class PhysicalType:
"""
Represents the physical type(s) that are dimensionally compatible
with a set of units.
Instances of this class should be accessed through either
`get_physical_type` or by using the
`~astropy.units.core.UnitBase.physical_type` attribute of units.
This class is not intended to be instantiated directly in user code.
Parameters
----------
unit : `~astropy.units.Unit`
The unit to be represented by the physical type.
physical_types : `str` or `set` of `str`
A `str` representing the name of the physical type of the unit,
or a `set` containing strings that represent one or more names
of physical types.
Notes
-----
A physical type will be considered equal to an equivalent
`PhysicalType` instance (recommended) or a string that contains a
name of the physical type. The latter method is not recommended
in packages, as the names of some physical types may change in the
future.
To maintain backwards compatibility, two physical type names may be
included in one string if they are separated with a slash (e.g.,
``"momentum/impulse"``). String representations of physical types
may include underscores instead of spaces.
Examples
--------
`PhysicalType` instances may be accessed via the
`~astropy.units.core.UnitBase.physical_type` attribute of units.
>>> import astropy.units as u
>>> u.meter.physical_type
PhysicalType('length')
`PhysicalType` instances may also be accessed by calling
`get_physical_type`. This function will accept a unit, a string
containing the name of a physical type, or the number one.
>>> u.get_physical_type(u.m ** -3)
PhysicalType('number density')
>>> u.get_physical_type("volume")
PhysicalType('volume')
>>> u.get_physical_type(1)
PhysicalType('dimensionless')
Some units are dimensionally compatible with multiple physical types.
A pascal is intended to represent pressure and stress, but the unit
decomposition is equivalent to that of energy density.
>>> pressure = u.get_physical_type("pressure")
>>> pressure
PhysicalType({'energy density', 'pressure', 'stress'})
>>> 'energy density' in pressure
True
Physical types can be tested for equality against other physical
type objects or against strings that may contain the name of a
physical type.
>>> area = (u.m ** 2).physical_type
>>> area == u.barn.physical_type
True
>>> area == "area"
True
Multiplication, division, and exponentiation are enabled so that
physical types may be used for dimensional analysis.
>>> length = u.pc.physical_type
>>> area = (u.cm ** 2).physical_type
>>> length * area
PhysicalType('volume')
>>> area / length
PhysicalType('length')
>>> length ** 3
PhysicalType('volume')
may also be performed using a string that contains the name of a
physical type.
>>> "length" * area
PhysicalType('volume')
>>> "area" / length
PhysicalType('length')
Unknown physical types are labelled as ``"unknown"``.
>>> (u.s ** 13).physical_type
PhysicalType('unknown')
Dimensional analysis may be performed for unknown physical types too.
>>> length_to_19th_power = (u.m ** 19).physical_type
>>> length_to_20th_power = (u.m ** 20).physical_type
>>> length_to_20th_power / length_to_19th_power
PhysicalType('length')
"""
def __init__(self, unit, physical_types):
self._unit = _replace_temperatures_with_kelvin(unit)
self._physical_type_id = self._unit._get_physical_type_id()
self._physical_type = _standardize_physical_type_names(physical_types)
self._physical_type_list = sorted(self._physical_type)
def __iter__(self):
yield from self._physical_type_list
def __getattr__(self, attr):
# TODO: remove this whole method when accessing str attributes from
# physical types is no longer supported
# short circuit attribute accessed in __str__ to prevent recursion
if attr == "_physical_type_list":
super().__getattribute__(attr)
self_str_attr = getattr(str(self), attr, None)
if hasattr(str(self), attr):
warning_message = (
f"support for accessing str attributes such as {attr!r} "
"from PhysicalType instances is deprecated since 4.3 "
"and will be removed in a subsequent release."
)
warnings.warn(warning_message, AstropyDeprecationWarning)
return self_str_attr
else:
super().__getattribute__(attr) # to get standard error message
def __eq__(self, other):
"""
Return `True` if ``other`` represents a physical type that is
consistent with the physical type of the `PhysicalType` instance.
"""
if isinstance(other, PhysicalType):
return self._physical_type_id == other._physical_type_id
elif isinstance(other, str):
other = _standardize_physical_type_names(other)
return other.issubset(self._physical_type)
else:
return NotImplemented
def __ne__(self, other):
equality = self.__eq__(other)
return not equality if isinstance(equality, bool) else NotImplemented
def _name_string_as_ordered_set(self):
return "{" + str(self._physical_type_list)[1:-1] + "}"
def __repr__(self):
if len(self._physical_type) == 1:
names = "'" + self._physical_type_list[0] + "'"
else:
names = self._name_string_as_ordered_set()
return f"PhysicalType({names})"
def __str__(self):
return "/".join(self._physical_type_list)
@staticmethod
def _dimensionally_compatible_unit(obj):
"""
Return a unit that corresponds to the provided argument.
If a unit is passed in, return that unit. If a physical type
(or a `str` with the name of a physical type) is passed in,
return a unit that corresponds to that physical type. If the
number equal to ``1`` is passed in, return a dimensionless unit.
Otherwise, return `NotImplemented`.
"""
if isinstance(obj, core.UnitBase):
return _replace_temperatures_with_kelvin(obj)
elif isinstance(obj, PhysicalType):
return obj._unit
elif isinstance(obj, numbers.Real) and obj == 1:
return core.dimensionless_unscaled
elif isinstance(obj, str):
return _physical_type_from_str(obj)._unit
else:
return NotImplemented
def _dimensional_analysis(self, other, operation):
other_unit = self._dimensionally_compatible_unit(other)
if other_unit is NotImplemented:
return NotImplemented
other_unit = _replace_temperatures_with_kelvin(other_unit)
new_unit = getattr(self._unit, operation)(other_unit)
return new_unit.physical_type
def __mul__(self, other):
return self._dimensional_analysis(other, "__mul__")
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._dimensional_analysis(other, "__truediv__")
def __rtruediv__(self, other):
other = self._dimensionally_compatible_unit(other)
if other is NotImplemented:
return NotImplemented
return other.physical_type._dimensional_analysis(self, "__truediv__")
def __pow__(self, power):
return (self._unit**power).physical_type
def __hash__(self):
return hash(self._physical_type_id)
def __len__(self):
return len(self._physical_type)
# We need to prevent operations like where a Unit instance left
# multiplies a PhysicalType instance from returning a `Quantity`
# instance with a PhysicalType as the value. We can do this by
# preventing np.array from casting a PhysicalType instance as
# an object array.
__array__ = None
def def_physical_type(unit, name):
"""
Add a mapping between a unit and the corresponding physical type(s).
If a physical type already exists for a unit, add new physical type
names so long as those names are not already in use for other
physical types.
Parameters
----------
unit : `~astropy.units.Unit`
The unit to be represented by the physical type.
name : `str` or `set` of `str`
A `str` representing the name of the physical type of the unit,
or a `set` containing strings that represent one or more names
of physical types.
Raises
------
ValueError
If a physical type name is already in use for another unit, or
if attempting to name a unit as ``"unknown"``.
"""
physical_type_id = unit._get_physical_type_id()
physical_type_names = _standardize_physical_type_names(name)
if "unknown" in physical_type_names:
raise ValueError("cannot uniquely define an unknown physical type")
names_for_other_units = set(_unit_physical_mapping.keys()).difference(
_physical_unit_mapping.get(physical_type_id, {})
)
names_already_in_use = physical_type_names & names_for_other_units
if names_already_in_use:
raise ValueError(
"the following physical type names are already in use: "
f"{names_already_in_use}."
)
unit_already_in_use = physical_type_id in _physical_unit_mapping
if unit_already_in_use:
physical_type = _physical_unit_mapping[physical_type_id]
physical_type_names |= set(physical_type)
physical_type.__init__(unit, physical_type_names)
else:
physical_type = PhysicalType(unit, physical_type_names)
_physical_unit_mapping[physical_type_id] = physical_type
for ptype in physical_type:
_unit_physical_mapping[ptype] = physical_type_id
for ptype_name in physical_type_names:
_name_physical_mapping[ptype_name] = physical_type
# attribute-accessible name
attr_name = ptype_name.replace(" ", "_").replace("(", "").replace(")", "")
_attrname_physical_mapping[attr_name] = physical_type
def get_physical_type(obj):
"""
Return the physical type that corresponds to a unit (or another
physical type representation).
Parameters
----------
obj : quantity-like or `~astropy.units.PhysicalType`-like
An object that (implicitly or explicitly) has a corresponding
physical type. This object may be a unit, a
`~astropy.units.Quantity`, an object that can be converted to a
`~astropy.units.Quantity` (such as a number or array), a string
that contains a name of a physical type, or a
`~astropy.units.PhysicalType` instance.
Returns
-------
`~astropy.units.PhysicalType`
A representation of the physical type(s) of the unit.
Examples
--------
The physical type may be retrieved from a unit or a
`~astropy.units.Quantity`.
>>> import astropy.units as u
>>> u.get_physical_type(u.meter ** -2)
PhysicalType('column density')
>>> u.get_physical_type(0.62 * u.barn * u.Mpc)
PhysicalType('volume')
The physical type may also be retrieved by providing a `str` that
contains the name of a physical type.
>>> u.get_physical_type("energy")
PhysicalType({'energy', 'torque', 'work'})
Numbers and arrays of numbers correspond to a dimensionless physical
type.
>>> u.get_physical_type(1)
PhysicalType('dimensionless')
"""
if isinstance(obj, PhysicalType):
return obj
if isinstance(obj, str):
return _physical_type_from_str(obj)
if isinstance(obj, core.UnitBase):
unit = obj
else:
try:
unit = quantity.Quantity(obj, copy=False).unit
except TypeError as exc:
raise TypeError(f"{obj} does not correspond to a physical type.") from exc
unit = _replace_temperatures_with_kelvin(unit)
physical_type_id = unit._get_physical_type_id()
unit_has_known_physical_type = physical_type_id in _physical_unit_mapping
if unit_has_known_physical_type:
return _physical_unit_mapping[physical_type_id]
else:
return PhysicalType(unit, "unknown")
# ------------------------------------------------------------------------------
# Script section creating the physical types and the documentation
# define the physical types
for unit, physical_type in _units_and_physical_types:
def_physical_type(unit, physical_type)
# For getting the physical types.
def __getattr__(name):
"""Checks for physical types using lazy import.
This also allows user-defined physical types to be accessible from the
:mod:`astropy.units.physical` module.
See `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_
Parameters
----------
name : str
The name of the attribute in this module. If it is already defined,
then this function is not called.
Returns
-------
ptype : `~astropy.units.physical.PhysicalType`
Raises
------
AttributeError
If the ``name`` does not correspond to a physical type
"""
if name in _attrname_physical_mapping:
return _attrname_physical_mapping[name]
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
def __dir__():
"""Return contents directory (__all__ + all physical type names)."""
return list(set(__all__) | set(_attrname_physical_mapping.keys()))
# This generates a docstring addition for this module that describes all of the
# standard physical types defined here.
if __doc__ is not None:
doclines = [
".. list-table:: Defined Physical Types",
" :header-rows: 1",
" :widths: 30 10 50",
"",
" * - Physical type",
" - Unit",
" - Other physical type(s) with same unit",
]
for name in sorted(_name_physical_mapping.keys()):
physical_type = _name_physical_mapping[name]
doclines += [
f" * - _`{name}`",
f" - :math:`{physical_type._unit.to_string('latex')[1:-1]}`",
f" - {', '.join([n for n in physical_type if n != name])}",
]
__doc__ += "\n\n" + "\n".join(doclines)
del unit, physical_type
|
a7e381517a21bf8ea663fafe707fdada04a2b1c4435df26b47969a06cc997171 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the CGS units. They are also available in the
top-level `astropy.units` namespace.
"""
from fractions import Fraction
from . import si
from .core import UnitBase, def_unit
_ns = globals()
def_unit(["cm", "centimeter"], si.cm, namespace=_ns, prefixes=False)
g = si.g
s = si.s
C = si.C
rad = si.rad
sr = si.sr
cd = si.cd
K = si.K
deg_C = si.deg_C
mol = si.mol
##########################################################################
# ACCELERATION
def_unit(
["Gal", "gal"],
cm / s**2,
namespace=_ns,
prefixes=True,
doc="Gal: CGS unit of acceleration",
)
##########################################################################
# ENERGY
# Use CGS definition of erg
def_unit(
["erg"],
g * cm**2 / s**2,
namespace=_ns,
prefixes=True,
doc="erg: CGS unit of energy",
)
##########################################################################
# FORCE
def_unit(
["dyn", "dyne"],
g * cm / s**2,
namespace=_ns,
prefixes=True,
doc="dyne: CGS unit of force",
)
##########################################################################
# PRESSURE
def_unit(
["Ba", "Barye", "barye"],
g / (cm * s**2),
namespace=_ns,
prefixes=True,
doc="Barye: CGS unit of pressure",
)
##########################################################################
# DYNAMIC VISCOSITY
def_unit(
["P", "poise"],
g / (cm * s),
namespace=_ns,
prefixes=True,
doc="poise: CGS unit of dynamic viscosity",
)
##########################################################################
# KINEMATIC VISCOSITY
def_unit(
["St", "stokes"],
cm**2 / s,
namespace=_ns,
prefixes=True,
doc="stokes: CGS unit of kinematic viscosity",
)
##########################################################################
# WAVENUMBER
def_unit(
["k", "Kayser", "kayser"],
cm**-1,
namespace=_ns,
prefixes=True,
doc="kayser: CGS unit of wavenumber",
)
###########################################################################
# ELECTRICAL
def_unit(
["D", "Debye", "debye"],
Fraction(1, 3) * 1e-29 * C * si.m,
namespace=_ns,
prefixes=True,
doc="Debye: CGS unit of electric dipole moment",
)
def_unit(
["Fr", "Franklin", "statcoulomb", "statC", "esu"],
g ** Fraction(1, 2) * cm ** Fraction(3, 2) * s**-1,
namespace=_ns,
doc="Franklin: CGS (ESU) unit of charge",
)
def_unit(
["statA", "statampere"],
Fr * s**-1,
namespace=_ns,
doc="statampere: CGS (ESU) unit of current",
)
def_unit(
["Bi", "Biot", "abA", "abampere"],
g ** Fraction(1, 2) * cm ** Fraction(1, 2) * s**-1,
namespace=_ns,
doc="Biot: CGS (EMU) unit of current",
)
def_unit(
["abC", "abcoulomb"],
Bi * s,
namespace=_ns,
doc="abcoulomb: CGS (EMU) of charge",
)
###########################################################################
# MAGNETIC
def_unit(
["G", "Gauss", "gauss"],
1e-4 * si.T,
namespace=_ns,
prefixes=True,
doc="Gauss: CGS unit for magnetic field",
)
def_unit(
["Mx", "Maxwell", "maxwell"],
1e-8 * si.Wb,
namespace=_ns,
doc="Maxwell: CGS unit for magnetic flux",
)
###########################################################################
# BASES
bases = {cm, g, s, rad, cd, K, mol}
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
del Fraction
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
639df830d052207eaea08e4062c471d214918a2df2164f528b518bef148c40d9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines deprecated units.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.deprecated`
module::
>>> from astropy.units import deprecated
>>> q = 10. * deprecated.emu # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import deprecated
>>> deprecated.enable() # doctest: +SKIP
"""
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import astrophys, cgs
from .core import _add_prefixes, def_unit
def_unit(["emu"], cgs.Bi, namespace=_ns, doc="Biot: CGS (EMU) unit of current")
# Add only some *prefixes* as deprecated units.
_add_prefixes(astrophys.jupiterMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.jupiterRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthRad, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
__doc__ += _generate_prefixonly_unit_summary(globals())
def enable():
"""
Enable deprecated units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable deprecated
units only temporarily.
"""
import inspect
# Local import to avoid cyclical import
# Local import to avoid polluting namespace
from .core import add_enabled_units
return add_enabled_units(inspect.getmodule(enable))
|
5fec495b1ecc64129e4bebbc683af7406e0d4f138d29eceb7bf3d983ea6eb19b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the SI units. They are also available in the
`astropy.units` namespace.
"""
import numpy as _numpy
from astropy.constants import si as _si
from .core import Unit, UnitBase, def_unit
_ns = globals()
###########################################################################
# DIMENSIONLESS
def_unit(
["percent", "pct"],
Unit(0.01),
namespace=_ns,
prefixes=False,
doc="percent: one hundredth of unity, factor 0.01",
format={"generic": "%", "console": "%", "cds": "%", "latex": r"\%", "unicode": "%"},
)
###########################################################################
# LENGTH
def_unit(
["m", "meter"],
namespace=_ns,
prefixes=True,
doc="meter: base unit of length in SI",
)
def_unit(
["micron"],
um,
namespace=_ns,
doc="micron: alias for micrometer (um)",
format={"latex": r"\mu m", "unicode": "\N{MICRO SIGN}m"},
)
def_unit(
["Angstrom", "AA", "angstrom"],
0.1 * nm,
namespace=_ns,
doc="ångström: 10 ** -10 m",
prefixes=[(["m", "milli"], ["milli", "m"], 1.0e-3)],
format={"latex": r"\mathring{A}", "unicode": "Å", "vounit": "Angstrom"},
)
###########################################################################
# VOLUMES
def_unit(
(["l", "L"], ["liter"]),
1000 * cm**3.0,
namespace=_ns,
prefixes=True,
format={"latex": r"\mathcal{l}", "unicode": "ℓ"},
doc="liter: metric unit of volume",
)
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(
["rad", "radian"],
namespace=_ns,
prefixes=True,
doc=(
"radian: angular measurement of the ratio between the length "
"on an arc and its radius"
),
)
def_unit(
["deg", "degree"],
_numpy.pi / 180.0 * rad,
namespace=_ns,
prefixes=True,
doc="degree: angular measurement 1/360 of full rotation",
format={"latex": r"{}^{\circ}", "unicode": "°"},
)
def_unit(
["hourangle"],
15.0 * deg,
namespace=_ns,
prefixes=False,
doc="hour angle: angular measurement with 24 in a full circle",
format={"latex": r"{}^{h}", "unicode": "ʰ"},
)
def_unit(
["arcmin", "arcminute"],
1.0 / 60.0 * deg,
namespace=_ns,
prefixes=True,
doc="arc minute: angular measurement",
format={"latex": r"{}^{\prime}", "unicode": "′"},
)
def_unit(
["arcsec", "arcsecond"],
1.0 / 3600.0 * deg,
namespace=_ns,
prefixes=True,
doc="arc second: angular measurement",
)
# These special formats should only be used for the non-prefix versions
arcsec._format = {"latex": r"{}^{\prime\prime}", "unicode": "″"}
def_unit(
["mas"],
0.001 * arcsec,
namespace=_ns,
doc="milli arc second: angular measurement",
)
def_unit(
["uas"],
0.000001 * arcsec,
namespace=_ns,
doc="micro arc second: angular measurement",
format={"latex": r"\mu as", "unicode": "μas"},
)
def_unit(
["sr", "steradian"],
rad**2,
namespace=_ns,
prefixes=True,
doc="steradian: unit of solid angle in SI",
)
###########################################################################
# TIME
def_unit(
["s", "second"],
namespace=_ns,
prefixes=True,
exclude_prefixes=["a"],
doc="second: base unit of time in SI.",
)
def_unit(
["min", "minute"],
60 * s,
prefixes=True,
namespace=_ns,
)
def_unit(
["h", "hour", "hr"],
3600 * s,
namespace=_ns,
prefixes=True,
exclude_prefixes=["p"],
)
def_unit(
["d", "day"],
24 * h,
namespace=_ns,
prefixes=True,
exclude_prefixes=["c", "y"],
)
def_unit(
["sday"],
86164.09053 * s,
namespace=_ns,
doc="Sidereal day (sday) is the time of one rotation of the Earth.",
)
def_unit(
["wk", "week"],
7 * day,
namespace=_ns,
)
def_unit(
["fortnight"],
2 * wk,
namespace=_ns,
)
def_unit(
["a", "annum"],
365.25 * d,
namespace=_ns,
prefixes=True,
exclude_prefixes=["P"],
)
def_unit(
["yr", "year"],
365.25 * d,
namespace=_ns,
prefixes=True,
)
###########################################################################
# FREQUENCY
def_unit(
["Hz", "Hertz", "hertz"],
1 / s,
namespace=_ns,
prefixes=True,
doc="Frequency",
)
###########################################################################
# MASS
def_unit(
["kg", "kilogram"],
namespace=_ns,
doc="kilogram: base unit of mass in SI.",
)
def_unit(
["g", "gram"],
1.0e-3 * kg,
namespace=_ns,
prefixes=True,
exclude_prefixes=["k", "kilo"],
)
def_unit(
["t", "tonne"],
1000 * kg,
namespace=_ns,
doc="Metric tonne",
)
###########################################################################
# AMOUNT OF SUBSTANCE
def_unit(
["mol", "mole"],
namespace=_ns,
prefixes=True,
doc="mole: amount of a chemical substance in SI.",
)
###########################################################################
# TEMPERATURE
def_unit(
["K", "Kelvin"],
namespace=_ns,
prefixes=True,
doc="Kelvin: temperature with a null point at absolute zero.",
)
def_unit(
["deg_C", "Celsius"],
namespace=_ns,
doc="Degrees Celsius",
format={"latex": r"{}^{\circ}C", "unicode": "°C", "fits": "Celsius"},
)
###########################################################################
# FORCE
def_unit(
["N", "Newton", "newton"],
kg * m * s**-2,
namespace=_ns,
prefixes=True,
doc="Newton: force",
)
##########################################################################
# ENERGY
def_unit(
["J", "Joule", "joule"],
N * m,
namespace=_ns,
prefixes=True,
doc="Joule: energy",
)
def_unit(
["eV", "electronvolt"],
_si.e.value * J,
namespace=_ns,
prefixes=True,
doc="Electron Volt",
)
##########################################################################
# PRESSURE
def_unit(
["Pa", "Pascal", "pascal"],
J * m**-3,
namespace=_ns,
prefixes=True,
doc="Pascal: pressure",
)
###########################################################################
# POWER
def_unit(
["W", "Watt", "watt"],
J / s,
namespace=_ns,
prefixes=True,
doc="Watt: power",
)
###########################################################################
# ELECTRICAL
def_unit(
["A", "ampere", "amp"],
namespace=_ns,
prefixes=True,
doc="ampere: base unit of electric current in SI",
)
def_unit(
["C", "coulomb"],
A * s,
namespace=_ns,
prefixes=True,
doc="coulomb: electric charge",
)
def_unit(
["V", "Volt", "volt"],
J * C**-1,
namespace=_ns,
prefixes=True,
doc="Volt: electric potential or electromotive force",
)
def_unit(
(["Ohm", "ohm"], ["Ohm"]),
V * A**-1,
namespace=_ns,
prefixes=True,
doc="Ohm: electrical resistance",
format={"latex": r"\Omega", "unicode": "Ω"},
)
def_unit(
["S", "Siemens", "siemens"],
A * V**-1,
namespace=_ns,
prefixes=True,
doc="Siemens: electrical conductance",
)
def_unit(
["F", "Farad", "farad"],
C * V**-1,
namespace=_ns,
prefixes=True,
doc="Farad: electrical capacitance",
)
###########################################################################
# MAGNETIC
def_unit(
["Wb", "Weber", "weber"],
V * s,
namespace=_ns,
prefixes=True,
doc="Weber: magnetic flux",
)
def_unit(
["T", "Tesla", "tesla"],
Wb * m**-2,
namespace=_ns,
prefixes=True,
doc="Tesla: magnetic flux density",
)
def_unit(
["H", "Henry", "henry"],
Wb * A**-1,
namespace=_ns,
prefixes=True,
doc="Henry: inductance",
)
###########################################################################
# ILLUMINATION
def_unit(
["cd", "candela"],
namespace=_ns,
prefixes=True,
doc="candela: base unit of luminous intensity in SI",
)
def_unit(
["lm", "lumen"],
cd * sr,
namespace=_ns,
prefixes=True,
doc="lumen: luminous flux",
)
def_unit(
["lx", "lux"],
lm * m**-2,
namespace=_ns,
prefixes=True,
doc="lux: luminous emittance",
)
###########################################################################
# RADIOACTIVITY
def_unit(
["Bq", "becquerel"],
1 / s,
namespace=_ns,
prefixes=False,
doc="becquerel: unit of radioactivity",
)
def_unit(
["Ci", "curie"],
Bq * 3.7e10,
namespace=_ns,
prefixes=False,
doc="curie: unit of radioactivity",
)
###########################################################################
# BASES
bases = {m, s, kg, A, cd, rad, K, mol}
###########################################################################
# CLEANUP
del UnitBase
del Unit
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
a165b3951e7a5ea498753b860316e349c0ea7dc6eec12d91a2fc0f61de29a719 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines miscellaneous units. They are also
available in the `astropy.units` namespace.
"""
from astropy.constants import si as _si
from . import si
from .core import UnitBase, binary_prefixes, def_unit, set_enabled_units, si_prefixes
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# AREAS
def_unit(
["barn", "barn"],
10**-28 * si.m**2,
namespace=_ns,
prefixes=True,
doc="barn: unit of area used in HEP",
)
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(
["cycle", "cy"],
2.0 * _numpy.pi * si.rad,
namespace=_ns,
prefixes=False,
doc="cycle: angular measurement, a full turn or rotation",
)
def_unit(
["spat", "sp"],
4.0 * _numpy.pi * si.sr,
namespace=_ns,
prefixes=False,
doc="spat: the solid angle of the sphere, 4pi sr",
)
##########################################################################
# PRESSURE
def_unit(
["bar"],
1e5 * si.Pa,
namespace=_ns,
prefixes=[(["m"], ["milli"], 1.0e-3)],
doc="bar: pressure",
)
# The torr is almost the same as mmHg but not quite.
# See https://en.wikipedia.org/wiki/Torr
# Define the unit here despite it not being an astrophysical unit.
# It may be moved if more similar units are created later.
def_unit(
["Torr", "torr"],
_si.atm.value / 760.0 * si.Pa,
namespace=_ns,
prefixes=[(["m"], ["milli"], 1.0e-3)],
doc=(
"Unit of pressure based on an absolute scale, now defined as "
"exactly 1/760 of a standard atmosphere"
),
)
###########################################################################
# MASS
def_unit(
["M_p"],
_si.m_p,
namespace=_ns,
doc="Proton mass",
format={"latex": r"M_{p}", "unicode": "Mₚ"},
)
def_unit(
["M_e"],
_si.m_e,
namespace=_ns,
doc="Electron mass",
format={"latex": r"M_{e}", "unicode": "Mₑ"},
)
# Unified atomic mass unit
def_unit(
["u", "Da", "Dalton"],
_si.u,
namespace=_ns,
prefixes=True,
exclude_prefixes=["a", "da"],
doc="Unified atomic mass unit",
)
###########################################################################
# COMPUTER
def_unit(
(["bit", "b"], ["bit"]),
namespace=_ns,
prefixes=si_prefixes + binary_prefixes,
)
def_unit(
(["byte", "B"], ["byte"]),
8 * bit,
namespace=_ns,
format={"vounit": "byte"},
prefixes=si_prefixes + binary_prefixes,
exclude_prefixes=["d"],
)
def_unit(
(["pix", "pixel"], ["pixel"]),
format={"ogip": "pixel", "vounit": "pixel"},
namespace=_ns,
prefixes=True,
)
def_unit(
(["vox", "voxel"], ["voxel"]),
format={"fits": "voxel", "ogip": "voxel", "vounit": "voxel"},
namespace=_ns,
prefixes=True,
)
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
9ecafac840928f9150eeefd5dc428b08c68e73e680fcaf64460bbb0877a0517c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""A set of standard astronomical equivalencies."""
import warnings
from collections import UserList
# THIRD-PARTY
import numpy as np
# LOCAL
from astropy.constants import si as _si
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import isiterable
from . import astrophys, cgs, dimensionless_unscaled, misc, si
from .core import Unit, UnitsError
from .function import units as function_units
__all__ = [
"parallax",
"spectral",
"spectral_density",
"doppler_radio",
"doppler_optical",
"doppler_relativistic",
"doppler_redshift",
"mass_energy",
"brightness_temperature",
"thermodynamic_temperature",
"beam_angular_area",
"dimensionless_angles",
"logarithmic",
"temperature",
"temperature_energy",
"molar_mass_amu",
"pixel_scale",
"plate_scale",
"Equivalency",
]
class Equivalency(UserList):
"""
A container for a units equivalency.
Attributes
----------
name: `str`
The name of the equivalency.
kwargs: `dict`
Any positional or keyword arguments used to make the equivalency.
"""
def __init__(self, equiv_list, name="", kwargs=None):
self.data = equiv_list
self.name = [name]
self.kwargs = [kwargs] if kwargs is not None else [dict()]
def __add__(self, other):
if isinstance(other, Equivalency):
new = super().__add__(other)
new.name = self.name[:] + other.name
new.kwargs = self.kwargs[:] + other.kwargs
return new
else:
return self.data.__add__(other)
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.name == other.name
and self.kwargs == other.kwargs
)
def dimensionless_angles():
"""Allow angles to be equivalent to dimensionless (with 1 rad = 1 m/m = 1).
It is special compared to other equivalency pairs in that it
allows this independent of the power to which the angle is raised,
and independent of whether it is part of a more complicated unit.
"""
return Equivalency([(si.radian, None)], "dimensionless_angles")
def logarithmic():
"""Allow logarithmic units to be converted to dimensionless fractions"""
return Equivalency(
[(dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.0**x)],
"logarithmic",
)
def parallax():
"""
Returns a list of equivalence pairs that handle the conversion
between parallax angle and distance.
"""
def parallax_converter(x):
x = np.asanyarray(x)
d = 1 / x
if isiterable(d):
d[d < 0] = np.nan
return d
else:
if d < 0:
return np.array(np.nan)
else:
return d
return Equivalency(
[(si.arcsecond, astrophys.parsec, parallax_converter)], "parallax"
)
def spectral():
"""
Returns a list of equivalence pairs that handle spectral
wavelength, wave number, frequency, and energy equivalencies.
Allows conversions between wavelength units, wave number units,
frequency units, and energy units as they relate to light.
There are two types of wave number:
* spectroscopic - :math:`1 / \\lambda` (per meter)
* angular - :math:`2 \\pi / \\lambda` (radian per meter)
"""
c = _si.c.value
h = _si.h.value
hc = h * c
two_pi = 2.0 * np.pi
inv_m_spec = si.m**-1
inv_m_ang = si.radian / si.m
return Equivalency(
[
(si.m, si.Hz, lambda x: c / x),
(si.m, si.J, lambda x: hc / x),
(si.Hz, si.J, lambda x: h * x, lambda x: x / h),
(si.m, inv_m_spec, lambda x: 1.0 / x),
(si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x),
(si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),
(inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),
(si.m, inv_m_ang, lambda x: two_pi / x),
(si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi),
(si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi),
],
"spectral",
)
def spectral_density(wav, factor=None):
"""
Returns a list of equivalence pairs that handle spectral density
with regard to wavelength and frequency.
Parameters
----------
wav : `~astropy.units.Quantity`
`~astropy.units.Quantity` associated with values being converted
(e.g., wavelength or frequency).
Notes
-----
The ``factor`` argument is left for backward-compatibility with the syntax
``spectral_density(unit, factor)`` but users are encouraged to use
``spectral_density(factor * unit)`` instead.
"""
from .core import UnitBase
if isinstance(wav, UnitBase):
if factor is None:
raise ValueError("If `wav` is specified as a unit, `factor` should be set")
wav = factor * wav # Convert to Quantity
c_Aps = _si.c.to_value(si.AA / si.s) # Angstrom/s
h_cgs = _si.h.cgs.value # erg * s
hc = c_Aps * h_cgs
# flux density
f_la = cgs.erg / si.angstrom / si.cm**2 / si.s
f_nu = cgs.erg / si.Hz / si.cm**2 / si.s
nu_f_nu = cgs.erg / si.cm**2 / si.s
la_f_la = nu_f_nu
phot_f_la = astrophys.photon / (si.cm**2 * si.s * si.AA)
phot_f_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz)
la_phot_f_la = astrophys.photon / (si.cm**2 * si.s)
# luminosity density
L_nu = cgs.erg / si.s / si.Hz
L_la = cgs.erg / si.s / si.angstrom
nu_L_nu = cgs.erg / si.s
la_L_la = nu_L_nu
phot_L_la = astrophys.photon / (si.s * si.AA)
phot_L_nu = astrophys.photon / (si.s * si.Hz)
# surface brightness (flux equiv)
S_la = cgs.erg / si.angstrom / si.cm**2 / si.s / si.sr
S_nu = cgs.erg / si.Hz / si.cm**2 / si.s / si.sr
nu_S_nu = cgs.erg / si.cm**2 / si.s / si.sr
la_S_la = nu_S_nu
phot_S_la = astrophys.photon / (si.cm**2 * si.s * si.AA * si.sr)
phot_S_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz * si.sr)
# surface brightness (luminosity equiv)
SL_nu = cgs.erg / si.s / si.Hz / si.sr
SL_la = cgs.erg / si.s / si.angstrom / si.sr
nu_SL_nu = cgs.erg / si.s / si.sr
la_SL_la = nu_SL_nu
phot_SL_la = astrophys.photon / (si.s * si.AA * si.sr)
phot_SL_nu = astrophys.photon / (si.s * si.Hz * si.sr)
def f_la_to_f_nu(x):
return x * (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def f_la_from_f_nu(x):
return x / (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def f_nu_to_nu_f_nu(x):
return x * wav.to_value(si.Hz, spectral())
def f_nu_from_nu_f_nu(x):
return x / wav.to_value(si.Hz, spectral())
def f_la_to_la_f_la(x):
return x * wav.to_value(si.AA, spectral())
def f_la_from_la_f_la(x):
return x / wav.to_value(si.AA, spectral())
def phot_f_la_to_f_la(x):
return hc * x / wav.to_value(si.AA, spectral())
def phot_f_la_from_f_la(x):
return x * wav.to_value(si.AA, spectral()) / hc
def phot_f_la_to_f_nu(x):
return h_cgs * x * wav.to_value(si.AA, spectral())
def phot_f_la_from_f_nu(x):
return x / (wav.to_value(si.AA, spectral()) * h_cgs)
def phot_f_la_to_phot_f_nu(x):
return x * wav.to_value(si.AA, spectral()) ** 2 / c_Aps
def phot_f_la_from_phot_f_nu(x):
return c_Aps * x / wav.to_value(si.AA, spectral()) ** 2
phot_f_nu_to_f_nu = phot_f_la_to_f_la
phot_f_nu_from_f_nu = phot_f_la_from_f_la
def phot_f_nu_to_f_la(x):
return x * hc * c_Aps / wav.to_value(si.AA, spectral()) ** 3
def phot_f_nu_from_f_la(x):
return x * wav.to_value(si.AA, spectral()) ** 3 / (hc * c_Aps)
# for luminosity density
L_nu_to_nu_L_nu = f_nu_to_nu_f_nu
L_nu_from_nu_L_nu = f_nu_from_nu_f_nu
L_la_to_la_L_la = f_la_to_la_f_la
L_la_from_la_L_la = f_la_from_la_f_la
phot_L_la_to_L_la = phot_f_la_to_f_la
phot_L_la_from_L_la = phot_f_la_from_f_la
phot_L_la_to_L_nu = phot_f_la_to_f_nu
phot_L_la_from_L_nu = phot_f_la_from_f_nu
phot_L_la_to_phot_L_nu = phot_f_la_to_phot_f_nu
phot_L_la_from_phot_L_nu = phot_f_la_from_phot_f_nu
phot_L_nu_to_L_nu = phot_f_nu_to_f_nu
phot_L_nu_from_L_nu = phot_f_nu_from_f_nu
phot_L_nu_to_L_la = phot_f_nu_to_f_la
phot_L_nu_from_L_la = phot_f_nu_from_f_la
return Equivalency(
[
# flux
(f_la, f_nu, f_la_to_f_nu, f_la_from_f_nu),
(f_nu, nu_f_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu),
(f_la, la_f_la, f_la_to_la_f_la, f_la_from_la_f_la),
(phot_f_la, f_la, phot_f_la_to_f_la, phot_f_la_from_f_la),
(phot_f_la, f_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu),
(phot_f_la, phot_f_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu),
(phot_f_nu, f_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu),
(phot_f_nu, f_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la),
# integrated flux
(la_phot_f_la, la_f_la, phot_f_la_to_f_la, phot_f_la_from_f_la),
# luminosity
(L_la, L_nu, f_la_to_f_nu, f_la_from_f_nu),
(L_nu, nu_L_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu),
(L_la, la_L_la, L_la_to_la_L_la, L_la_from_la_L_la),
(phot_L_la, L_la, phot_L_la_to_L_la, phot_L_la_from_L_la),
(phot_L_la, L_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu),
(phot_L_la, phot_L_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu),
(phot_L_nu, L_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu),
(phot_L_nu, L_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la),
# surface brightness (flux equiv)
(S_la, S_nu, f_la_to_f_nu, f_la_from_f_nu),
(S_nu, nu_S_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu),
(S_la, la_S_la, f_la_to_la_f_la, f_la_from_la_f_la),
(phot_S_la, S_la, phot_f_la_to_f_la, phot_f_la_from_f_la),
(phot_S_la, S_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu),
(phot_S_la, phot_S_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu),
(phot_S_nu, S_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu),
(phot_S_nu, S_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la),
# surface brightness (luminosity equiv)
(SL_la, SL_nu, f_la_to_f_nu, f_la_from_f_nu),
(SL_nu, nu_SL_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu),
(SL_la, la_SL_la, L_la_to_la_L_la, L_la_from_la_L_la),
(phot_SL_la, SL_la, phot_L_la_to_L_la, phot_L_la_from_L_la),
(phot_SL_la, SL_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu),
(phot_SL_la, phot_SL_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu),
(phot_SL_nu, SL_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu),
(phot_SL_nu, SL_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la),
],
"spectral_density",
{"wav": wav, "factor": factor},
)
def doppler_radio(rest):
r"""
Return the equivalency pairs for the radio convention for velocity.
The radio convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f_0} ; f(V) = f_0 ( 1 - V/c )`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> radio_CO_equiv = u.doppler_radio(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> radio_velocity = measured_freq.to(u.km/u.s, equivalencies=radio_CO_equiv)
>>> radio_velocity # doctest: +FLOAT_CMP
<Quantity -31.209092088877583 km / s>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value("km/s")
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return (restfreq - x) / (restfreq) * ckms
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x / ckms
return restfreq * (1 - voverc)
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return (x - restwav) / (x) * ckms
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return restwav * ckms / (ckms - x)
def to_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
return (resten - x) / (resten) * ckms
def from_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
voverc = x / ckms
return resten * (1 - voverc)
return Equivalency(
[
(si.Hz, si.km / si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km / si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km / si.s, to_vel_en, from_vel_en),
],
"doppler_radio",
{"rest": rest},
)
def doppler_optical(rest):
r"""
Return the equivalency pairs for the optical convention for velocity.
The optical convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f } ; f(V) = f_0 ( 1 + V/c )^{-1}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> optical_CO_equiv = u.doppler_optical(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> optical_velocity = measured_freq.to(u.km/u.s, equivalencies=optical_CO_equiv)
>>> optical_velocity # doctest: +FLOAT_CMP
<Quantity -31.20584348799674 km / s>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value("km/s")
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return ckms * (restfreq - x) / x
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x / ckms
return restfreq / (1 + voverc)
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return ckms * (x / restwav - 1)
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
voverc = x / ckms
return restwav * (1 + voverc)
def to_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
return ckms * (resten - x) / x
def from_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
voverc = x / ckms
return resten / (1 + voverc)
return Equivalency(
[
(si.Hz, si.km / si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km / si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km / si.s, to_vel_en, from_vel_en),
],
"doppler_optical",
{"rest": rest},
)
def doppler_relativistic(rest):
r"""
Return the equivalency pairs for the relativistic convention for velocity.
The full relativistic convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0^2 - f^2}{f_0^2 + f^2} ; f(V) = f_0 \frac{\left(1 - (V/c)^2\right)^{1/2}}{(1+V/c)}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> relativistic_CO_equiv = u.doppler_relativistic(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> relativistic_velocity = measured_freq.to(u.km/u.s, equivalencies=relativistic_CO_equiv)
>>> relativistic_velocity # doctest: +FLOAT_CMP
<Quantity -31.207467619351537 km / s>
>>> measured_velocity = 1250 * u.km/u.s
>>> relativistic_frequency = measured_velocity.to(u.GHz, equivalencies=relativistic_CO_equiv)
>>> relativistic_frequency # doctest: +FLOAT_CMP
<Quantity 114.79156866993588 GHz>
>>> relativistic_wavelength = measured_velocity.to(u.mm, equivalencies=relativistic_CO_equiv)
>>> relativistic_wavelength # doctest: +FLOAT_CMP
<Quantity 2.6116243681798923 mm>
""" # noqa: E501
assert_is_spectral_unit(rest)
ckms = _si.c.to_value("km/s")
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return (restfreq**2 - x**2) / (restfreq**2 + x**2) * ckms
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x / ckms
return restfreq * ((1 - voverc) / (1 + (voverc))) ** 0.5
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return (x**2 - restwav**2) / (restwav**2 + x**2) * ckms
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
voverc = x / ckms
return restwav * ((1 + voverc) / (1 - voverc)) ** 0.5
def to_vel_en(x):
resten = rest.to_value(si.eV, spectral())
return (resten**2 - x**2) / (resten**2 + x**2) * ckms
def from_vel_en(x):
resten = rest.to_value(si.eV, spectral())
voverc = x / ckms
return resten * ((1 - voverc) / (1 + (voverc))) ** 0.5
return Equivalency(
[
(si.Hz, si.km / si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km / si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km / si.s, to_vel_en, from_vel_en),
],
"doppler_relativistic",
{"rest": rest},
)
def doppler_redshift():
"""
Returns the equivalence between Doppler redshift (unitless) and radial velocity.
.. note::
This equivalency is not compatible with cosmological
redshift in `astropy.cosmology.units`.
"""
rv_unit = si.km / si.s
C_KMS = _si.c.to_value(rv_unit)
def convert_z_to_rv(z):
zponesq = (1 + z) ** 2
return C_KMS * (zponesq - 1) / (zponesq + 1)
def convert_rv_to_z(rv):
beta = rv / C_KMS
return np.sqrt((1 + beta) / (1 - beta)) - 1
return Equivalency(
[(dimensionless_unscaled, rv_unit, convert_z_to_rv, convert_rv_to_z)],
"doppler_redshift",
)
def molar_mass_amu():
"""
Returns the equivalence between amu and molar mass.
"""
return Equivalency([(si.g / si.mol, misc.u)], "molar_mass_amu")
def mass_energy():
"""
Returns a list of equivalence pairs that handle the conversion
between mass and energy.
"""
c2 = _si.c.value**2
return Equivalency(
[
(si.kg, si.J, lambda x: x * c2, lambda x: x / c2),
(si.kg / si.m**2, si.J / si.m**2, lambda x: x * c2, lambda x: x / c2),
(si.kg / si.m**3, si.J / si.m**3, lambda x: x * c2, lambda x: x / c2),
(si.kg / si.s, si.J / si.s, lambda x: x * c2, lambda x: x / c2),
],
"mass_energy",
)
def brightness_temperature(frequency, beam_area=None):
r"""
Defines the conversion between Jy/sr and "brightness temperature",
:math:`T_B`, in Kelvins. The brightness temperature is a unit very
commonly used in radio astronomy. See, e.g., "Tools of Radio Astronomy"
(Wilson 2009) eqn 8.16 and eqn 8.19 (these pages are available on `google
books
<https://books.google.com/books?id=9KHw6R8rQEMC&pg=PA179&source=gbs_toc_r&cad=4#v=onepage&q&f=false>`__).
:math:`T_B \equiv S_\nu / \left(2 k \nu^2 / c^2 \right)`
If the input is in Jy/beam or Jy (assuming it came from a single beam), the
beam area is essential for this computation: the brightness temperature is
inversely proportional to the beam area.
Parameters
----------
frequency : `~astropy.units.Quantity`
The observed ``spectral`` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength). The variable is named 'frequency' because it
is more commonly used in radio astronomy.
BACKWARD COMPATIBILITY NOTE: previous versions of the brightness
temperature equivalency used the keyword ``disp``, which is no longer
supported.
beam_area : `~astropy.units.Quantity` ['solid angle']
Beam area in angular units, i.e. steradian equivalent
Examples
--------
Arecibo C-band beam::
>>> import numpy as np
>>> from astropy import units as u
>>> beam_sigma = 50*u.arcsec
>>> beam_area = 2*np.pi*(beam_sigma)**2
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (1*u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 3.526295144567176 K>
VLA synthetic beam::
>>> bmaj = 15*u.arcsec
>>> bmin = 15*u.arcsec
>>> fwhm_to_sigma = 1./(8*np.log(2))**0.5
>>> beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma**2)
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 217.2658703625732 K>
Any generic surface brightness:
>>> surf_brightness = 1e6*u.MJy/u.sr
>>> surf_brightness.to(u.K, equivalencies=u.brightness_temperature(500*u.GHz)) # doctest: +FLOAT_CMP
<Quantity 130.1931904778803 K>
""" # noqa: E501
if frequency.unit.is_equivalent(si.sr):
if not beam_area.unit.is_equivalent(si.Hz):
raise ValueError(
"The inputs to `brightness_temperature` are frequency and angular area."
)
warnings.warn(
"The inputs to `brightness_temperature` have changed. "
"Frequency is now the first input, and angular area "
"is the second, optional input.",
AstropyDeprecationWarning,
)
frequency, beam_area = beam_area, frequency
nu = frequency.to(si.GHz, spectral())
factor_Jy = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value
factor_K = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value
if beam_area is not None:
beam = beam_area.to_value(si.sr)
def convert_Jy_to_K(x_jybm):
return x_jybm / beam / factor_Jy
def convert_K_to_Jy(x_K):
return x_K * beam / factor_K
return Equivalency(
[
(astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy),
(astrophys.Jy / astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy),
],
"brightness_temperature",
{"frequency": frequency, "beam_area": beam_area},
)
else:
def convert_JySr_to_K(x_jysr):
return x_jysr / factor_Jy
def convert_K_to_JySr(x_K):
return x_K / factor_K # multiplied by 1x for 1 steradian
return Equivalency(
[(astrophys.Jy / si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)],
"brightness_temperature",
{"frequency": frequency, "beam_area": beam_area},
)
def beam_angular_area(beam_area):
"""
Convert between the ``beam`` unit, which is commonly used to express the area
of a radio telescope resolution element, and an area on the sky.
This equivalency also supports direct conversion between ``Jy/beam`` and
``Jy/steradian`` units, since that is a common operation.
Parameters
----------
beam_area : unit-like
The area of the beam in angular area units (e.g., steradians)
Must have angular area equivalent units.
"""
return Equivalency(
[
(astrophys.beam, Unit(beam_area)),
(astrophys.beam**-1, Unit(beam_area) ** -1),
(astrophys.Jy / astrophys.beam, astrophys.Jy / Unit(beam_area)),
],
"beam_angular_area",
{"beam_area": beam_area},
)
def thermodynamic_temperature(frequency, T_cmb=None):
r"""Defines the conversion between Jy/sr and "thermodynamic temperature",
:math:`T_{CMB}`, in Kelvins. The thermodynamic temperature is a unit very
commonly used in cosmology. See eqn 8 in [1]
:math:`K_{CMB} \equiv I_\nu / \left(2 k \nu^2 / c^2 f(\nu) \right)`
with :math:`f(\nu) = \frac{ x^2 e^x}{(e^x - 1 )^2}`
where :math:`x = h \nu / k T`
Parameters
----------
frequency : `~astropy.units.Quantity`
The observed `spectral` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength). Must have spectral units.
T_cmb : `~astropy.units.Quantity` ['temperature'] or None
The CMB temperature at z=0. If `None`, the default cosmology will be
used to get this temperature. Must have units of temperature.
Notes
-----
For broad band receivers, this conversion do not hold
as it highly depends on the frequency
References
----------
.. [1] Planck 2013 results. IX. HFI spectral response
https://arxiv.org/abs/1303.5070
Examples
--------
Planck HFI 143 GHz::
>>> from astropy import units as u
>>> from astropy.cosmology import Planck15
>>> freq = 143 * u.GHz
>>> equiv = u.thermodynamic_temperature(freq, Planck15.Tcmb0)
>>> (1. * u.mK).to(u.MJy / u.sr, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 0.37993172 MJy / sr>
"""
nu = frequency.to(si.GHz, spectral())
if T_cmb is None:
from astropy.cosmology import default_cosmology
T_cmb = default_cosmology.get().Tcmb0
def f(nu, T_cmb=T_cmb):
x = _si.h * nu / _si.k_B / T_cmb
return x**2 * np.exp(x) / np.expm1(x) ** 2
def convert_Jy_to_K(x_jybm):
factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(
astrophys.Jy
)
return x_jybm / factor
def convert_K_to_Jy(x_K):
factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(
si.K
)
return x_K / factor
return Equivalency(
[(astrophys.Jy / si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],
"thermodynamic_temperature",
{"frequency": frequency, "T_cmb": T_cmb},
)
def temperature():
"""Convert between Kelvin, Celsius, Rankine and Fahrenheit here because
Unit and CompositeUnit cannot do addition or subtraction properly.
"""
from .imperial import deg_F as F
from .imperial import deg_R as R
K = si.K
C = si.deg_C
return Equivalency(
[
(K, C, lambda x: x - 273.15, lambda x: x + 273.15),
(C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),
(K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),
(R, F, lambda x: x - 459.67, lambda x: x + 459.67),
(R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),
(R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),
],
"temperature",
)
def temperature_energy():
"""Convert between Kelvin and keV(eV) to an equivalent amount."""
e = _si.e.value
k_B = _si.k_B.value
return Equivalency(
[(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],
"temperature_energy",
)
def assert_is_spectral_unit(value):
try:
value.to(si.Hz, spectral())
except (AttributeError, UnitsError) as ex:
raise UnitsError(
"The 'rest' value must be a spectral equivalent "
"(frequency, wavelength, or energy)."
)
def pixel_scale(pixscale):
"""
Convert between pixel distances (in units of ``pix``) and other units,
given a particular ``pixscale``.
Parameters
----------
pixscale : `~astropy.units.Quantity`
The pixel scale either in units of <unit>/pixel or pixel/<unit>.
"""
decomposed = pixscale.unit.decompose()
dimensions = dict(zip(decomposed.bases, decomposed.powers))
pix_power = dimensions.get(misc.pix, 0)
if pix_power == -1:
physical_unit = Unit(pixscale * misc.pix)
elif pix_power == 1:
physical_unit = Unit(misc.pix / pixscale)
else:
raise UnitsError(
"The pixel scale unit must have pixel dimensionality of 1 or -1."
)
return Equivalency(
[(misc.pix, physical_unit)], "pixel_scale", {"pixscale": pixscale}
)
def plate_scale(platescale):
"""
Convert between lengths (to be interpreted as lengths in the focal plane)
and angular units with a specified ``platescale``.
Parameters
----------
platescale : `~astropy.units.Quantity`
The pixel scale either in units of distance/pixel or distance/angle.
"""
if platescale.unit.is_equivalent(si.arcsec / si.m):
platescale_val = platescale.to_value(si.radian / si.m)
elif platescale.unit.is_equivalent(si.m / si.arcsec):
platescale_val = (1 / platescale).to_value(si.radian / si.m)
else:
raise UnitsError("The pixel scale must be in angle/distance or distance/angle")
return Equivalency(
[(si.m, si.radian, lambda d: d * platescale_val, lambda a: a / platescale_val)],
"plate_scale",
{"platescale": platescale},
)
# -------------------------------------------------------------------------
def __getattr__(attr):
if attr == "with_H0":
import warnings
from astropy.cosmology.units import with_H0
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"`with_H0` is deprecated from `astropy.units.equivalencies` "
"since astropy 5.0 and may be removed in a future version. "
"Use `astropy.cosmology.units.with_H0` instead.",
AstropyDeprecationWarning,
)
return with_H0
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
4b12e7711b687b65538b89c1ee740c0db5b779d6430720f92f59f1523f1ac18f | import copy
import operator
import re
import warnings
import erfa
import numpy as np
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.table import QTable
from astropy.time import Time
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle
from .baseframe import BaseCoordinateFrame, GenericFrame, frame_transform_graph
from .distances import Distance
from .representation import (
RadialDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from .sky_coordinate_parsers import (
_get_frame_class,
_get_frame_without_data,
_parse_coordinate_data,
)
__all__ = ["SkyCoord", "SkyCoordInfo"]
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ["{0." + compname + ".value:}" for compname in repr_data.components]
return ",".join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ",".join(
str(getattr(repr_data, comp).unit) or "None"
for comp in repr_data.components
)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if issubclass(sc.representation_type, SphericalRepresentation) and isinstance(
sc.data, UnitSphericalRepresentation
):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type, in_frame_units=True)
return repr_data
def _represent_as_dict(self):
sc = self._parent
attrs = list(sc.representation_component_names)
# Don't output distance unless it's actually distance.
if isinstance(sc.data, UnitSphericalRepresentation):
attrs = attrs[:-1]
diff = sc.data.differentials.get("s")
if diff is not None:
diff_attrs = list(sc.get_representation_component_names("s"))
# Don't output proper motions if they haven't been specified.
if isinstance(diff, RadialDifferential):
diff_attrs = diff_attrs[2:]
# Don't output radial velocity unless it's actually velocity.
elif isinstance(
diff, (UnitSphericalDifferential, UnitSphericalCosLatDifferential)
):
diff_attrs = diff_attrs[:-1]
attrs.extend(diff_attrs)
attrs.extend(frame_transform_graph.frame_attributes.keys())
out = super()._represent_as_dict(attrs)
out["representation_type"] = sc.representation_type.get_name()
out["frame"] = sc.frame.name
# Note that sc.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
def new_like(self, skycoords, length, metadata_conflicts="warn", name=None):
"""
Return a new SkyCoord instance which is consistent with the input
SkyCoord objects ``skycoords`` and has ``length`` rows. Being
"consistent" is defined as being able to set an item from one to each of
the rest without any exception being raised.
This is intended for creating a new SkyCoord instance whose elements can
be set in-place for table operations like join or vstack. This is used
when a SkyCoord object is used as a mixin column in an astropy Table.
The data values are not predictable and it is expected that the consumer
of the object will fill in all values.
Parameters
----------
skycoords : list
List of input SkyCoord objects
length : int
Length of the output skycoord object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output name (sets output skycoord.info.name)
Returns
-------
skycoord : SkyCoord (or subclass)
Instance of this class consistent with ``skycoords``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
skycoords, metadata_conflicts, name, ("meta", "description")
)
skycoord0 = skycoords[0]
# Make a new SkyCoord object with the desired length and attributes
# by using the _apply / __getitem__ machinery to effectively return
# skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame
# attributes with the right shape.
indexes = np.zeros(length, dtype=np.int64)
out = skycoord0[indexes]
# Use __setitem__ machinery to check for consistency of all skycoords
for skycoord in skycoords[1:]:
try:
out[0] = skycoord[0]
except Exception as err:
raise ValueError("Input skycoords are inconsistent.") from err
# Set (merged) info attributes
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The `SkyCoord` class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: https://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a `SkyCoord`
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias. The frame classes that are built in
to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`.
The string aliases are simply lower-case versions of the class name, and
allow for creating a `SkyCoord` object and transforming frames without
explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this `SkyCoord` should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied coordinate values.
If only one unit is supplied then it applies to all values.
Note that passing only one unit might lead to unit conversion errors
if the coordinate values are expected to have mixed physical meanings
(e.g., angles and distances).
obstime : time-like, optional
Time(s) of observation.
equinox : time-like, optional
Coordinate frame equinox time.
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : angle-like, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including `ICRS`,
`FK5`, `FK4`, and `FK4NoETerms`.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components, in angle per time units.
l, b : angle-like, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the `Galactic` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components in the `Galactic` frame, in angle per time
units.
x, y, z : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))
):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError(
"Cannot initialize from a coordinate frame "
"instance without coordinate data"
)
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs
)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError("Cannot create a SkyCoord without data")
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
# TODO: remove these in future
@property
def representation(self):
return self.frame.representation
@representation.setter
def representation(self, value):
self.frame.representation = value
@property
def shape(self):
return self.frame.shape
def __eq__(self, value):
"""Equality operator for SkyCoord
This implements strict equality and requires that the frames are
equivalent, extra frame attributes are equivalent, and that the
representation data are exactly equal.
"""
if isinstance(value, BaseCoordinateFrame):
if value._data is None:
raise ValueError("Can only compare SkyCoord to Frame with data")
return self.frame == value
if not isinstance(value, SkyCoord):
return NotImplemented
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(
f"cannot compare: extra frame attribute '{attr}' is not equivalent"
" (perhaps compare the frames directly to avoid this exception)"
)
return self._sky_coord_frame == value._sky_coord_frame
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method, *args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, "shape", ()):
value = apply_method(value)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, "_" + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
"""Implement self[item] = value for SkyCoord
The right hand ``value`` must be strictly consistent with self:
- Identical class
- Equivalent frames
- Identical representation_types
- Identical representation differentials keys
- Identical frame attributes
- Identical "extra" frame attributes (e.g. obstime for an ICRS coord)
With these caveats the setitem ends up as effectively a setitem on
the representation data.
self.frame.data[item] = value.frame.data
"""
if self.__class__ is not value.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(f"attribute {attr} is not equivalent")
# Set the frame values. This checks frame equivalence and also clears
# the cache to ensure that the object is not in an inconsistent state.
self._sky_coord_frame[item] = value._sky_coord_frame
def insert(self, obj, values, axis=0):
"""
Insert coordinate values before the given indices in the object and
return a new Frame object.
The values to be inserted must conform to the rules for in-place setting
of ``SkyCoord`` objects.
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple insertion before the index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.coordinates.SkyCoord` instance
New coordinate object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
# Set the output values. This is where validation of `values` takes place to ensure
# that it can indeed be inserted.
out[:idx0] = self[:idx0]
out[idx0 : idx0 + n_values] = values
out[idx0 + n_values :] = self[idx0:]
return out
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : frame class, frame object, or str
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
# TODO! like matplotlib, do string overrides for modified methods
new_frame = (
_get_frame_class(new_frame) if isinstance(new_frame, str) else new_frame
)
return self.frame.is_transformable_to(new_frame)
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
`SkyCoord` that are not part of the destination frame's definition are
kept (stored on the resulting `SkyCoord`), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without losing obstime).
Parameters
----------
frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance
The frame to transform this coordinate into. If a `SkyCoord`, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : `SkyCoord`
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if frame_val is not None and not (
merge_attributes and frame.is_frame_attr_default(attr)
):
frame_kwargs[attr] = frame_val
elif self_val is not None and not self.is_frame_attr_default(attr):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError(
"Transform `frame` must be a frame name, class, or instance"
)
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError(
f"Cannot transform from {self.frame.__class__} to {new_frame_cls}"
)
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in set(new_coord.frame_attributes) & set(frame_kwargs.keys()):
frame_kwargs.pop(attr)
# Always remove the origin frame attribute, as that attribute only makes
# sense with a SkyOffsetFrame (in which case it will be stored on the frame).
# See gh-11277.
# TODO: Should it be a property of the frame attribute that it can
# or cannot be stored on a SkyCoord?
frame_kwargs.pop("origin", None)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation."
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : `SkyCoord`
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
from .builtin_frames.icrs import ICRS
if (new_obstime is None) == (dt is None):
raise ValueError(
"You must specify one of `new_obstime` or `dt`, but not both."
)
# Validate that we have velocity info
if "s" not in self.frame.data.differentials:
raise ValueError("SkyCoord requires velocity data to evolve the position.")
if "obstime" in self.frame.frame_attributes:
raise NotImplementedError(
"Updating the coordinates in a frame with explicit time dependence is"
" currently not supported. If you would like this functionality, please"
" open an issue on github:\nhttps://github.com/astropy/astropy"
)
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError(
"This object has no associated `obstime`. apply_space_motion() must"
" receive a time difference, `dt`, and not a new obstime."
)
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time("J2000")
new_obstime = None # we don't actually know the initial obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials["s"]
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.0
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km / u.s)
except u.UnitConversionError: # No RV
rv = 0.0
starpm = erfa.pmsafe(
icrsrep.lon.radian,
icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian / u.yr),
icrsvel.d_lat.to_value(u.radian / u.yr),
plx,
rv,
t1.jd1,
t1.jd2,
t2.jd1,
t2.jd2,
)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(
ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian / u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian / u.yr, copy=False),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km / u.s, copy=False),
differential_type=SphericalDifferential,
)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {
attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names
}
frattrs["obstime"] = new_obstime
result = self.__class__(icrs2, **frattrs).transform_to(self.frame)
# Without this the output might not have the right differential type.
# Not sure if this fixes the problem or just hides it. See #11932
result.differential_type = self.differential_type
return result
def _is_name(self, string):
"""
Returns whether a string is one of the aliases for the frame.
"""
return self.frame.name == string or (
isinstance(self.frame.name, list) and string in self.frame.name
)
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the primary transform graph.
"""
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.frame_attributes:
return getattr(self.frame, attr)
else:
return getattr(self, "_" + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Fail
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attr}'"
)
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__("_" + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__("_" + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
def __dir__(self):
"""
Override the builtin `dir` behavior to include:
- Transforms available by aliases
- Attribute / methods of the underlying self.frame object
"""
dir_values = set(super().__dir__())
# determine the aliases that this can be transformed to.
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(
{attr for attr in dir(self.frame) if not attr.startswith("_")}
)
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return sorted(dir_values)
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ": " + frameattrs
data = self.frame._data_repr()
if data:
data = ": " + data
return f"<{clsnm} ({coonm}{frameattrs}){data}>"
def to_string(self, style="decimal", **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
**kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {
"hmsdms": {
"lonargs": {"unit": u.hour, "pad": True},
"latargs": {"unit": u.degree, "pad": True, "alwayssign": True},
},
"dms": {"lonargs": {"unit": u.degree}, "latargs": {"unit": u.degree}},
"decimal": {
"lonargs": {"unit": u.degree, "decimal": True},
"latargs": {"unit": u.degree, "decimal": True},
},
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]["lonargs"])
latargs.update(styles[style]["latargs"])
else:
raise ValueError(f"Invalid style. Valid options are: {','.join(styles)}")
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (
f"{sph_coord.lon.to_string(**lonargs)}"
f" {sph_coord.lat.to_string(**latargs)}"
)
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [
f"{lonangle.to_string(**lonargs)} {latangle.to_string(**latargs)}"
]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def to_table(self):
"""
Convert this |SkyCoord| to a |QTable|.
Any attributes that have the same length as the |SkyCoord| will be
converted to columns of the |QTable|. All other attributes will be
recorded as metadata.
Returns
-------
`~astropy.table.QTable`
A |QTable| containing the data of this |SkyCoord|.
Examples
--------
>>> sc = SkyCoord(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg,
... obstime=Time([2000, 2010], format='jyear'))
>>> t = sc.to_table()
>>> t
<QTable length=2>
ra dec obstime
deg deg
float64 float64 Time
------- ------- -------
40.0 0.0 2000.0
70.0 -20.0 2010.0
>>> t.meta
{'representation_type': 'spherical', 'frame': 'icrs'}
"""
self_as_dict = self.info._represent_as_dict()
tabledata = {}
metadata = {}
# Record attributes that have the same length as self as columns in the
# table, and the other attributes as table metadata. This matches
# table.serialize._represent_mixin_as_column().
for key, value in self_as_dict.items():
if getattr(value, "shape", ())[:1] == (len(self),):
tabledata[key] = value
else:
metadata[key] = value
return QTable(tabledata, meta=metadata)
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two `SkyCoord` objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if not BaseCoordinateFrame._frameattr_equiv(
getattr(self, fattrnm), getattr(other, fattrnm)
):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't frame-like"
)
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"This object does not have a distance; cannot compute 3d separation."
)
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"The other object does not have a distance; "
"cannot compute 3d separation."
)
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation :
for the *total* angular offset (not broken out into components).
position_angle :
for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError(
"Tried to use spherical_offsets_to with two non-matching frames!"
)
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def spherical_offsets_by(self, d_lon, d_lat):
"""
Computes the coordinate that is a specified pair of angular offsets away
from this coordinate.
Parameters
----------
d_lon : angle-like
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
d_lat : angle-like
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Returns
-------
newcoord : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
``d_lat`` in the latitude direction and ``d_lon`` in the longitude
direction.
Notes
-----
This internally uses `~astropy.coordinates.SkyOffsetFrame` to do the
transformation. For a more complete set of transform offsets, use
`~astropy.coordinates.SkyOffsetFrame` or `~astropy.wcs.WCS` manually.
This specific method can be reproduced by doing
``SkyCoord(SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))``.
See Also
--------
spherical_offsets_to : compute the angular offsets to another coordinate
directional_offset_by : offset a coordinate by an angle in a direction
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return self.__class__(
SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self)
)
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given `position_angle` and `separation`.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
position_angle : inverse operation for the ``position_angle`` component
separation : inverse operation for the ``separation`` component
"""
from . import angle_utilities
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = angle_utilities.offset_by(
lon=slon, lat=slat, posang=position_angle, distance=separation
)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_sky(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_sky"
)
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_3d(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_3d"
)
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : coordinate-like
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` ['angle']
The on-sky separation to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(
searcharoundcoords, self, seplimit, storekdtree="_kdtree_sky"
)
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` ['length']
The physical radius to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(
searcharoundcoords, self, distlimit, storekdtree="_kdtree_3d"
)
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
`SkyCoord` and another.
Parameters
----------
other : `SkyCoord`
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError(
"Can only get position_angle to another "
"SkyCoord or a coordinate frame with data"
)
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this `SkyCoord` at the origin.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this `SkyCoord` (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) of the coordinates this `SkyCoord`
contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array `SkyCoord`, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position information is used
extra_frameattrs = {nm: getattr(self, nm) for nm in self._extra_frameattr_names}
novel = SkyCoord(
self.realize_frame(self.data.without_differentials()), **extra_frameattrs
)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
# return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode="all"):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
from astropy.wcs.utils import skycoord_to_pixel
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode="all"):
"""
Create a new `SkyCoord` from pixel coordinates using an
`~astropy.wcs.WCS` object.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : `~astropy.coordinates.SkyCoord`
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
from astropy.wcs.utils import pixel_to_skycoord
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the cooordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax, xmax = image.shape
else:
xmax, ymax = wcs._naxis
import warnings
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x, y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(
self, kind="barycentric", obstime=None, location=None
):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the `SkyCoord` will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the `SkyCoord` will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this `SkyCoord`.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` ['speed']
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. The barycentric correction returned uses the optical
approximation v = z * c. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord, EarthLocation
>>> from astropy.constants import c
>>> t = Time(56370.5, format='mjd', scale='utc')
>>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m)
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA
>>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP
Also note that this method returns the correction velocity in the so-called
*optical convention*::
>>> vcorr = zb * c # doctest: +SKIP
where ``zb`` is the barycentric correction redshift as defined in section 3
of Wright & Eastman (2014). The application formula given above follows from their
equation (11) under assumption that the radial velocity ``rv`` has also been defined
using the same optical convention. Note, this can be regarded as a matter of
velocity definition and does not by itself imply any loss of accuracy, provided
sufficient care has been taken during interpretation of the results. If you need
the barycentric correction expressed as the full relativistic velocity (e.g., to provide
it as the input to another software which performs the application), the
following recipe can be used::
>>> zb = vcorr / c # doctest: +REMOTE_DATA
>>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA
>>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA
or alternatively using just equivalencies::
>>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA
See also `~astropy.units.equivalencies.doppler_optical`,
`~astropy.units.equivalencies.doppler_radio`, and
`~astropy.units.equivalencies.doppler_relativistic` for more information on
the velocity conventions.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
>>> from astropy.coordinates import solar_system_ephemeris
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA
... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel
# location validation
timeloc = getattr(obstime, "location", None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError(
"`location` cannot be in both the passed-in `obstime` and this"
" `SkyCoord` because it is ambiguous which is meant for the"
" radial_velocity_correction."
)
elif timeloc is not None:
location = timeloc
else:
raise TypeError(
"Must provide a `location` to radial_velocity_correction, either as"
" a SkyCoord frame attribute, as an attribute on the passed in"
" `obstime`, or in the method call."
)
elif self.location is not None or timeloc is not None:
raise ValueError(
"Cannot compute radial velocity correction if `location` argument is"
" passed in and there is also a `location` attribute on this SkyCoord"
" or the passed-in `obstime`."
)
# obstime validation
coo_at_rv_obstime = self # assume we need no space motion for now
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError(
"Must provide an `obstime` to radial_velocity_correction, either as"
" a SkyCoord frame attribute or in the method call."
)
elif self.obstime is not None and self.frame.data.differentials:
# we do need space motion after all
coo_at_rv_obstime = self.apply_space_motion(obstime)
elif self.obstime is None:
# warn the user if the object has differentials set
if "s" in self.data.differentials:
warnings.warn(
"SkyCoord has space motion, and therefore the specified "
"position of the SkyCoord may not be the same as "
"the `obstime` for the radial velocity measurement. "
"This may affect the rv correction at the order of km/s"
"for very high proper motions sources. If you wish to "
"apply space motion of the SkyCoord to correct for this"
"the `obstime` attribute of the SkyCoord must be set",
AstropyUserWarning,
)
pos_earth, v_earth = get_body_barycentric_posvel("earth", obstime)
if kind == "barycentric":
v_origin_to_earth = v_earth
elif kind == "heliocentric":
v_sun = get_body_barycentric_posvel("sun", obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError(
"`kind` argument to radial_velocity_correction must "
f"be 'barycentric' or 'heliocentric', but got '{kind}'"
)
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
icrs_cart = coo_at_rv_obstime.icrs.cartesian
icrs_cart_novel = icrs_cart.without_differentials()
if self.data.__class__ is UnitSphericalRepresentation:
targcart = icrs_cart_novel
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
targcart = icrs_cart_novel - obs_icrs_cart
targcart /= targcart.norm()
if kind == "barycentric":
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm() ** 2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr / speed_of_light)
# try and get terms corresponding to stellar motion.
if icrs_cart.differentials:
try:
ro = self.icrs.cartesian
beta_star = ro.differentials["s"].to_cartesian() / speed_of_light
# ICRS unit vector at coordinate epoch
ro = ro.without_differentials()
ro /= ro.norm()
zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart))
except u.UnitConversionError:
warnings.warn(
"SkyCoord contains some velocity information, but not enough to"
" calculate the full space motion of the source, and so this"
" has been ignored for the purposes of calculating the radial"
" velocity correction. This can lead to errors on the order of"
" metres/second.",
AstropyUserWarning,
)
zb = zb - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new `SkyCoord` from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames (including
differentials), if they are also followed by a non-alphanumeric
character. It will also match columns that *end* with the component name
if a non-alphanumeric character is *before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : `~astropy.table.Table` or subclass
The table to load data from.
**coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord` or subclass
The new `SkyCoord` (or subclass) object.
Raises
------
ValueError
If more than one match is found in the table for a component,
unless the additional matches are also valid frame component names.
If a "coord_kwargs" is provided for a value also found in the table.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs["frame"] = coord_kwargs.get("frame", frame)
representation_component_names = set(
frame.get_representation_component_names()
).union(set(frame.get_representation_component_names("s")))
comp_kwargs = {}
for comp_name in representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r"(\W|\b|_)"
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r".*(\W|\b|_)" + comp_name + r"\b"
# the final regex ORs together the two patterns
rex = re.compile(
rf"({starts_with_comp})|({ends_with_comp})", re.IGNORECASE | re.UNICODE
)
# find all matches
matches = {col_name for col_name in table.colnames if rex.match(col_name)}
# now need to select among matches, also making sure we don't have
# an exact match with another component
if len(matches) == 0: # no matches
continue
elif len(matches) == 1: # only one match
col_name = matches.pop()
else: # more than 1 match
# try to sieve out other components
matches -= representation_component_names - {comp_name}
# if there's only one remaining match, it worked.
if len(matches) == 1:
col_name = matches.pop()
else:
raise ValueError(
f'Found at least two matches for component "{comp_name}":'
f' "{matches}". Cannot guess coordinates from a table with this'
" ambiguity."
)
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError(
f'Found column "{v.name}" in table, but it was already provided as'
' "{k}" keyword to guess_from_table function.'
)
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame="icrs", parse=False, cache=True):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names, e.g.,
'CRTS SSS100805 J194428-420209', this may be much faster than a
Sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, optional
Determines whether to cache the results or not. To update or
overwrite an existing value, pass ``cache='update'``.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse, cache=cache)
icrs_sky_coord = cls(icrs_coord)
if frame in ("icrs", icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
|
8e23ca29cae1e674268ccb534875360f861dc73400e0f77b0b5fcb9107870da2 | """
In this module, we define the coordinate representation classes, which are
used to represent low-level cartesian, spherical, cylindrical, and other
coordinates.
"""
import abc
import functools
import inspect
import operator
import warnings
import numpy as np
from erfa import ufunc as erfa_ufunc
import astropy.units as u
from astropy.utils import ShapedLikeNDArray, classproperty
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import DuplicateRepresentationWarning
from .angles import Angle, Latitude, Longitude
from .distances import Distance
from .matrix_utilities import is_O3
__all__ = [
"BaseRepresentationOrDifferential",
"BaseRepresentation",
"CartesianRepresentation",
"SphericalRepresentation",
"UnitSphericalRepresentation",
"RadialRepresentation",
"PhysicsSphericalRepresentation",
"CylindricalRepresentation",
"BaseDifferential",
"CartesianDifferential",
"BaseSphericalDifferential",
"BaseSphericalCosLatDifferential",
"SphericalDifferential",
"SphericalCosLatDifferential",
"UnitSphericalDifferential",
"UnitSphericalCosLatDifferential",
"RadialDifferential",
"CylindricalDifferential",
"PhysicsSphericalDifferential",
]
# Module-level dict mapping representation string alias names to classes.
# This is populated by __init_subclass__ when called by Representation or
# Differential classes so that they are all registered automatically.
REPRESENTATION_CLASSES = {}
DIFFERENTIAL_CLASSES = {}
# set for tracking duplicates
DUPLICATE_REPRESENTATIONS = set()
# a hash for the content of the above two dicts, cached for speed.
_REPRDIFF_HASH = None
def _fqn_class(cls):
"""Get the fully qualified name of a class"""
return cls.__module__ + "." + cls.__qualname__
def get_reprdiff_cls_hash():
"""
Returns a hash value that should be invariable if the
`REPRESENTATION_CLASSES` and `DIFFERENTIAL_CLASSES` dictionaries have not
changed.
"""
global _REPRDIFF_HASH
if _REPRDIFF_HASH is None:
_REPRDIFF_HASH = hash(tuple(REPRESENTATION_CLASSES.items())) + hash(
tuple(DIFFERENTIAL_CLASSES.items())
)
return _REPRDIFF_HASH
def _invalidate_reprdiff_cls_hash():
global _REPRDIFF_HASH
_REPRDIFF_HASH = None
def _array2string(values, prefix=""):
# Work around version differences for array2string.
kwargs = {"separator": ", ", "prefix": prefix}
kwargs["formatter"] = {}
return np.array2string(values, **kwargs)
class BaseRepresentationOrDifferentialInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Indicates unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
# Create numpy dtype so that numpy formatting will work.
components = val.components
values = tuple(getattr(val, component).value for component in components)
a = np.empty(
getattr(val, "shape", ()),
[(component, value.dtype) for component, value in zip(components, values)],
)
for component, value in zip(components, values):
a[component] = value
return str(a)
@property
def _represent_as_dict_attrs(self):
return self._parent.components
@property
def unit(self):
if self._parent is None:
return None
unit = self._parent._unitstr
return unit[1:-1] if unit.startswith("(") else unit
def new_like(self, reps, length, metadata_conflicts="warn", name=None):
"""
Return a new instance like ``reps`` with ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
reps : list
List of input representations or differentials.
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `BaseRepresentation` or `BaseDifferential` subclass instance
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
reps, metadata_conflicts, name, ("meta", "description")
)
# Make a new representation or differential with the desired length
# using the _apply / __getitem__ machinery to effectively return
# rep0[[0, 0, ..., 0, 0]]. This will have the right shape, and
# include possible differentials.
indexes = np.zeros(length, dtype=np.int64)
out = reps[0][indexes]
# Use __setitem__ machinery to check whether all representations
# can represent themselves as this one without loss of information.
for rep in reps[1:]:
try:
out[0] = rep[0]
except Exception as err:
raise ValueError("input representations are inconsistent.") from err
# Set (merged) info attributes.
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class BaseRepresentationOrDifferential(ShapedLikeNDArray):
"""3D coordinate representations and differentials.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D point or differential. The names are the
keys and the subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied; if `False`, they will be
broadcast together but not use new memory.
"""
# Ensure multiplication/division with ndarray or Quantity doesn't lead to
# object arrays.
__array_priority__ = 50000
info = BaseRepresentationOrDifferentialInfo()
def __init__(self, *args, **kwargs):
# make argument a list, so we can pop them off.
args = list(args)
components = self.components
if (
args
and isinstance(args[0], self.__class__)
and all(arg is None for arg in args[1:])
):
rep_or_diff = args[0]
copy = kwargs.pop("copy", True)
attrs = [getattr(rep_or_diff, component) for component in components]
if "info" in rep_or_diff.__dict__:
self.info = rep_or_diff.info
if kwargs:
raise TypeError(
"unexpected keyword arguments for case "
f"where class instance is passed in: {kwargs}"
)
else:
attrs = []
for component in components:
try:
attr = args.pop(0) if args else kwargs.pop(component)
except KeyError:
raise TypeError(
"__init__() missing 1 required positional "
f"argument: {component!r}"
) from None
if attr is None:
raise TypeError(
"__init__() missing 1 required positional argument:"
f" {component!r} (or first argument should be an instance of"
f" {self.__class__.__name__})."
)
attrs.append(attr)
copy = args.pop(0) if args else kwargs.pop("copy", True)
if args:
raise TypeError(f"unexpected arguments: {args}")
if kwargs:
for component in components:
if component in kwargs:
raise TypeError(
f"__init__() got multiple values for argument {component!r}"
)
raise TypeError(f"unexpected keyword arguments: {kwargs}")
# Pass attributes through the required initializing classes.
attrs = [
self.attr_classes[component](attr, copy=copy, subok=True)
for component, attr in zip(components, attrs)
]
try:
bc_attrs = np.broadcast_arrays(*attrs, subok=True)
except ValueError as err:
if len(components) <= 2:
c_str = " and ".join(components)
else:
c_str = ", ".join(components[:2]) + ", and " + components[2]
raise ValueError(f"Input parameters {c_str} cannot be broadcast") from err
# The output of np.broadcast_arrays() has limitations on writeability, so we perform
# additional handling to enable writeability in most situations. This is primarily
# relevant for allowing the changing of the wrap angle of longitude components.
#
# If the shape has changed for a given component, broadcasting is needed:
# If copy=True, we make a copy of the broadcasted array to ensure writeability.
# Note that array had already been copied prior to the broadcasting.
# TODO: Find a way to avoid the double copy.
# If copy=False, we use the broadcasted array, and writeability may still be
# limited.
# If the shape has not changed for a given component, we can proceed with using the
# non-broadcasted array, which avoids writeability issues from np.broadcast_arrays().
attrs = [
(bc_attr.copy() if copy else bc_attr)
if bc_attr.shape != attr.shape
else attr
for attr, bc_attr in zip(attrs, bc_attrs)
]
# Set private attributes for the attributes. (If not defined explicitly
# on the class, the metaclass will define properties to access these.)
for component, attr in zip(components, attrs):
setattr(self, "_" + component, attr)
@classmethod
def get_name(cls):
"""Name of the representation or differential.
In lower case, with any trailing 'representation' or 'differential'
removed. (E.g., 'spherical' for
`~astropy.coordinates.SphericalRepresentation` or
`~astropy.coordinates.SphericalDifferential`.)
"""
name = cls.__name__.lower()
if name.endswith("representation"):
name = name[:-14]
elif name.endswith("differential"):
name = name[:-12]
return name
# The two methods that any subclass has to define.
@classmethod
@abc.abstractmethod
def from_cartesian(cls, other):
"""Create a representation of this class from a supplied Cartesian one.
Parameters
----------
other : `CartesianRepresentation`
The representation to turn into this class
Returns
-------
representation : `BaseRepresentation` subclass instance
A new representation of this class's type.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@abc.abstractmethod
def to_cartesian(self):
"""Convert the representation to its Cartesian form.
Note that any differentials get dropped.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. For example, transforming
an angular position defined at distance=0 through cartesian coordinates
and back will lose the original angular coordinates::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> rep = coord.SphericalRepresentation(
... lon=15*u.deg,
... lat=-11*u.deg,
... distance=0*u.pc)
>>> rep.to_cartesian().represent_as(coord.SphericalRepresentation)
<SphericalRepresentation (lon, lat, distance) in (rad, rad, pc)
(0., 0., 0.)>
Returns
-------
cartrepr : `CartesianRepresentation`
The representation in Cartesian form.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@property
def components(self):
"""A tuple with the in-order names of the coordinate components."""
return tuple(self.attr_classes)
def __eq__(self, value):
"""Equality operator
This implements strict equality and requires that the representation
classes are identical and that the representation data are exactly equal.
"""
if self.__class__ is not value.__class__:
raise TypeError(
"cannot compare: objects must have same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
try:
np.broadcast(self, value)
except ValueError as exc:
raise ValueError(f"cannot compare: {exc}") from exc
out = True
for comp in self.components:
out &= getattr(self, "_" + comp) == getattr(value, "_" + comp)
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation or differential with ``method`` applied
to the component data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for
`~astropy.coordinates.CartesianRepresentation`), with the results used
to create a new instance.
Internally, it is also used to apply functions to the components
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
new = super().__new__(self.__class__)
for component in self.components:
setattr(new, "_" + component, apply_method(getattr(self, component)))
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
if value.__class__ is not self.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
for component in self.components:
getattr(self, "_" + component)[item] = getattr(value, "_" + component)
@property
def shape(self):
"""The shape of the instance and underlying arrays.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of any of the components cannot be changed without the
arrays being copied. For these cases, use the ``reshape`` method
(which copies any arrays that cannot be reshaped in-place).
"""
return getattr(self, self.components[0]).shape
@shape.setter
def shape(self, shape):
# We keep track of arrays that were already reshaped since we may have
# to return those to their original shape if a later shape-setting
# fails. (This can happen since coordinates are broadcast together.)
reshaped = []
oldshape = self.shape
for component in self.components:
val = getattr(self, component)
if val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
# Required to support multiplication and division, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _scale_operation(self, op, *args):
raise NotImplementedError()
def __mul__(self, other):
return self._scale_operation(operator.mul, other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._scale_operation(operator.truediv, other)
def __neg__(self):
return self._scale_operation(operator.neg)
# Follow numpy convention and make an independent copy.
def __pos__(self):
return self.copy()
# Required to support addition and subtraction, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _combine_operation(self, op, other, reverse=False):
raise NotImplementedError()
def __add__(self, other):
return self._combine_operation(operator.add, other)
def __radd__(self, other):
return self._combine_operation(operator.add, other, reverse=True)
def __sub__(self, other):
return self._combine_operation(operator.sub, other)
def __rsub__(self, other):
return self._combine_operation(operator.sub, other, reverse=True)
# The following are used for repr and str
@property
def _values(self):
"""Turn the coordinates into a record array with the coordinate values.
The record array fields will have the component names.
"""
coo_items = [(c, getattr(self, c)) for c in self.components]
result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items])
for c, coo in coo_items:
result[c] = coo.value
return result
@property
def _units(self):
"""Return a dictionary with the units of the coordinate components."""
return {cmpnt: getattr(self, cmpnt).unit for cmpnt in self.components}
@property
def _unitstr(self):
units_set = set(self._units.values())
if len(units_set) == 1:
unitstr = units_set.pop().to_string()
else:
unitstr = "({})".format(
", ".join(
self._units[component].to_string() for component in self.components
)
)
return unitstr
def __str__(self):
return f"{_array2string(self._values)} {self._unitstr:s}"
def __repr__(self):
prefixstr = " "
arrstr = _array2string(self._values, prefix=prefixstr)
diffstr = ""
if getattr(self, "differentials", None):
diffstr = "\n (has differentials w.r.t.: {})".format(
", ".join([repr(key) for key in self.differentials.keys()])
)
unitstr = ("in " + self._unitstr) if self._unitstr else "[dimensionless]"
return (
f"<{self.__class__.__name__} ({', '.join(self.components)})"
f" {unitstr:s}\n{prefixstr}{arrstr}{diffstr}>"
)
def _make_getter(component):
"""Make an attribute getter for use in a property.
Parameters
----------
component : str
The name of the component that should be accessed. This assumes the
actual value is stored in an attribute of that name prefixed by '_'.
"""
# This has to be done in a function to ensure the reference to component
# is not lost/redirected.
component = "_" + component
def get_component(self):
return getattr(self, component)
return get_component
class RepresentationInfo(BaseRepresentationOrDifferentialInfo):
@property
def _represent_as_dict_attrs(self):
attrs = super()._represent_as_dict_attrs
if self._parent._differentials:
attrs += ("differentials",)
return attrs
def _represent_as_dict(self, attrs=None):
out = super()._represent_as_dict(attrs)
for key, value in out.pop("differentials", {}).items():
out[f"differentials.{key}"] = value
return out
def _construct_from_dict(self, map):
differentials = {}
for key in list(map.keys()):
if key.startswith("differentials."):
differentials[key[14:]] = map.pop(key)
map["differentials"] = differentials
return super()._construct_from_dict(map)
class BaseRepresentation(BaseRepresentationOrDifferential):
"""Base for representing a point in a 3D coordinate system.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D points. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
subclass instance, or a dictionary with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All representation classes should subclass this base representation class,
and define an ``attr_classes`` attribute, a `dict`
which maps component names to the class that creates them. They must also
define a ``to_cartesian`` method and a ``from_cartesian`` class method. By
default, transformations are done via the cartesian system, but classes
that want to define a smarter transformation path can overload the
``represent_as`` method. If one wants to use an associated differential
class, one should also define ``unit_vectors`` and ``scale_factors``
methods (see those methods for details).
"""
info = RepresentationInfo()
def __init_subclass__(cls, **kwargs):
# Register representation name (except for BaseRepresentation)
if cls.__name__ == "BaseRepresentation":
return
if not hasattr(cls, "attr_classes"):
raise NotImplementedError(
'Representations must have an "attr_classes" class attribute.'
)
repr_name = cls.get_name()
# first time a duplicate is added
# remove first entry and add both using their qualnames
if repr_name in REPRESENTATION_CLASSES:
DUPLICATE_REPRESENTATIONS.add(repr_name)
fqn_cls = _fqn_class(cls)
existing = REPRESENTATION_CLASSES[repr_name]
fqn_existing = _fqn_class(existing)
if fqn_cls == fqn_existing:
raise ValueError(f'Representation "{fqn_cls}" already defined')
msg = (
f'Representation "{repr_name}" already defined, removing it to avoid'
f' confusion.Use qualnames "{fqn_cls}" and "{fqn_existing}" or class'
" instances directly"
)
warnings.warn(msg, DuplicateRepresentationWarning)
del REPRESENTATION_CLASSES[repr_name]
REPRESENTATION_CLASSES[fqn_existing] = existing
repr_name = fqn_cls
# further definitions with the same name, just add qualname
elif repr_name in DUPLICATE_REPRESENTATIONS:
fqn_cls = _fqn_class(cls)
warnings.warn(
f'Representation "{repr_name}" already defined, using qualname '
f'"{fqn_cls}".'
)
repr_name = fqn_cls
if repr_name in REPRESENTATION_CLASSES:
raise ValueError(f'Representation "{repr_name}" already defined')
REPRESENTATION_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# define getters for any component that does not yet have one.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(
cls,
component,
property(
_make_getter(component),
doc=f"The '{component}' component of the points(s).",
),
)
super().__init_subclass__(**kwargs)
def __init__(self, *args, differentials=None, **kwargs):
# Handle any differentials passed in.
super().__init__(*args, **kwargs)
if differentials is None and args and isinstance(args[0], self.__class__):
differentials = args[0]._differentials
self._differentials = self._validate_differentials(differentials)
def _validate_differentials(self, differentials):
"""
Validate that the provided differentials are appropriate for this
representation and recast/reshape as necessary and then return.
Note that this does *not* set the differentials on
``self._differentials``, but rather leaves that for the caller.
"""
# Now handle the actual validation of any specified differential classes
if differentials is None:
differentials = dict()
elif isinstance(differentials, BaseDifferential):
# We can't handle auto-determining the key for this combo
if isinstance(differentials, RadialDifferential) and isinstance(
self, UnitSphericalRepresentation
):
raise ValueError(
"To attach a RadialDifferential to a UnitSphericalRepresentation,"
" you must supply a dictionary with an appropriate key."
)
key = differentials._get_deriv_key(self)
differentials = {key: differentials}
for key in differentials:
try:
diff = differentials[key]
except TypeError as err:
raise TypeError(
"'differentials' argument must be a dictionary-like object"
) from err
diff._check_base(self)
if isinstance(diff, RadialDifferential) and isinstance(
self, UnitSphericalRepresentation
):
# We trust the passing of a key for a RadialDifferential
# attached to a UnitSphericalRepresentation because it will not
# have a paired component name (UnitSphericalRepresentation has
# no .distance) to automatically determine the expected key
pass
else:
expected_key = diff._get_deriv_key(self)
if key != expected_key:
raise ValueError(
f"For differential object '{repr(diff)}', expected "
f"unit key = '{expected_key}' but received key = '{key}'"
)
# For now, we are very rigid: differentials must have the same shape
# as the representation. This makes it easier to handle __getitem__
# and any other shape-changing operations on representations that
# have associated differentials
if diff.shape != self.shape:
# TODO: message of IncompatibleShapeError is not customizable,
# so use a valueerror instead?
raise ValueError(
"Shape of differentials must be the same "
f"as the shape of the representation ({diff.shape} vs {self.shape})"
)
return differentials
def _raise_if_has_differentials(self, op_name):
"""
Used to raise a consistent exception for any operation that is not
supported when a representation has differentials attached.
"""
if self.differentials:
raise TypeError(
f"Operation '{op_name}' is not supported when "
f"differentials are attached to a {self.__class__.__name__}."
)
@classproperty
def _compatible_differentials(cls):
return [DIFFERENTIAL_CLASSES[cls.get_name()]]
@property
def differentials(self):
"""A dictionary of differential class instances.
The keys of this dictionary must be a string representation of the SI
unit with which the differential (derivative) is taken. For example, for
a velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
"""
return self._differentials
# We do not make unit_vectors and scale_factors abstract methods, since
# they are only necessary if one also defines an associated Differential.
# Also, doing so would break pre-differential representation subclasses.
def unit_vectors(self):
r"""Cartesian unit vectors in the direction of each component.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented unit vectors")
def scale_factors(self):
r"""Scale factors for each component's direction.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
scale_factors : dict of `~astropy.units.Quantity`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented scale factors.")
def _re_represent_differentials(self, new_rep, differential_class):
"""Re-represent the differentials to the specified classes.
This returns a new dictionary with the same keys but with the
attached differentials converted to the new differential classes.
"""
if differential_class is None:
return dict()
if not self.differentials and differential_class:
raise ValueError("No differentials associated with this representation!")
elif (
len(self.differentials) == 1
and inspect.isclass(differential_class)
and issubclass(differential_class, BaseDifferential)
):
# TODO: is there a better way to do this?
differential_class = {
list(self.differentials.keys())[0]: differential_class
}
elif differential_class.keys() != self.differentials.keys():
raise ValueError(
"Desired differential classes must be passed in as a dictionary with"
" keys equal to a string representation of the unit of the derivative"
" for each differential stored with this "
f"representation object ({self.differentials})"
)
new_diffs = dict()
for k in self.differentials:
diff = self.differentials[k]
try:
new_diffs[k] = diff.represent_as(differential_class[k], base=self)
except Exception as err:
if differential_class[k] not in new_rep._compatible_differentials:
raise TypeError(
f"Desired differential class {differential_class[k]} is not "
"compatible with the desired "
f"representation class {new_rep.__class__}"
) from err
else:
raise
return new_diffs
def represent_as(self, other_class, differential_class=None):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via Cartesian coordinates.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. See the docstring for
:meth:`~astropy.coordinates.BaseRepresentationOrDifferential.to_cartesian`
for an example.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional
Classes in which the differentials should be represented.
Can be a single class if only a single differential is attached,
otherwise it should be a `dict` keyed by the same keys as the
differentials.
"""
if other_class is self.__class__ and not differential_class:
return self.without_differentials()
else:
if isinstance(other_class, str):
raise ValueError(
"Input to a representation's represent_as must be a class, not "
"a string. For strings, use frame objects."
)
if other_class is not self.__class__:
# The default is to convert via cartesian coordinates
new_rep = other_class.from_cartesian(self.to_cartesian())
else:
new_rep = self
new_rep._differentials = self._re_represent_differentials(
new_rep, differential_class
)
return new_rep
def transform(self, matrix):
"""Transform coordinates using a 3x3 matrix in a Cartesian basis.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
"""
# route transformation through Cartesian
difs_cls = {k: CartesianDifferential for k in self.differentials.keys()}
crep = self.represent_as(
CartesianRepresentation, differential_class=difs_cls
).transform(matrix)
# move back to original representation
difs_cls = {k: diff.__class__ for k, diff in self.differentials.items()}
rep = crep.represent_as(self.__class__, difs_cls)
return rep
def with_differentials(self, differentials):
"""
Create a new representation with the same positions as this
representation, but with these new differentials.
Differential keys that already exist in this object's differential dict
are overwritten.
Parameters
----------
differentials : sequence of `~astropy.coordinates.BaseDifferential` subclass instance
The differentials for the new representation to have.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A copy of this representation, but with the ``differentials`` as
its differentials.
"""
if not differentials:
return self
args = [getattr(self, component) for component in self.components]
# We shallow copy the differentials dictionary so we don't update the
# current object's dictionary when adding new keys
new_rep = self.__class__(
*args, differentials=self.differentials.copy(), copy=False
)
new_rep._differentials.update(new_rep._validate_differentials(differentials))
return new_rep
def without_differentials(self):
"""Return a copy of the representation without attached differentials.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A shallow copy of this representation, without any differentials.
If no differentials were present, no copy is made.
"""
if not self._differentials:
return self
args = [getattr(self, component) for component in self.components]
return self.__class__(*args, copy=False)
@classmethod
def from_representation(cls, representation):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
"""
return representation.represent_as(cls)
def __eq__(self, value):
"""Equality operator for BaseRepresentation
This implements strict equality and requires that the representation
classes are identical, the differentials are identical, and that the
representation data are exactly equal.
"""
# BaseRepresentationOrDifferental (checks classes and compares components)
out = super().__eq__(value)
# super() checks that the class is identical so can this even happen?
# (same class, different differentials ?)
if self._differentials.keys() != value._differentials.keys():
raise ValueError("cannot compare: objects must have same differentials")
for self_diff, value_diff in zip(
self._differentials.values(), value._differentials.values()
):
out &= self_diff == value_diff
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation with ``method`` applied to the component
data.
This is not a simple inherit from ``BaseRepresentationOrDifferential``
because we need to call ``._apply()`` on any associated differential
classes.
See docstring for `BaseRepresentationOrDifferential._apply`.
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
rep = super()._apply(method, *args, **kwargs)
rep._differentials = {
k: diff._apply(method, *args, **kwargs)
for k, diff in self._differentials.items()
}
return rep
def __setitem__(self, item, value):
if not isinstance(value, BaseRepresentation):
raise TypeError(
f"value must be a representation instance, not {type(value)}."
)
if not (
isinstance(value, self.__class__)
or len(value.attr_classes) == len(self.attr_classes)
):
raise ValueError(
f"value must be representable as {self.__class__.__name__} "
"without loss of information."
)
diff_classes = {}
if self._differentials:
if self._differentials.keys() != value._differentials.keys():
raise ValueError("value must have the same differentials.")
for key, self_diff in self._differentials.items():
diff_classes[key] = self_diff_cls = self_diff.__class__
value_diff_cls = value._differentials[key].__class__
if not (
isinstance(value_diff_cls, self_diff_cls)
or (
len(value_diff_cls.attr_classes)
== len(self_diff_cls.attr_classes)
)
):
raise ValueError(
f"value differential {key!r} must be representable as "
f"{self_diff.__class__.__name__} without loss of information."
)
value = value.represent_as(self.__class__, diff_classes)
super().__setitem__(item, value)
for key, differential in self._differentials.items():
differential[item] = value._differentials[key]
def _scale_operation(self, op, *args):
"""Scale all non-angular components, leaving angular ones unchanged.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
results = []
for component, cls in self.attr_classes.items():
value = getattr(self, component)
if issubclass(cls, Angle):
results.append(value)
else:
results.append(op(value, *args))
# try/except catches anything that cannot initialize the class, such
# as operations that returned NotImplemented or a representation
# instead of a quantity (as would happen for, e.g., rep * rep).
try:
result = self.__class__(*results)
except Exception:
return NotImplemented
for key, differential in self.differentials.items():
diff_result = differential._scale_operation(op, *args, scaled_base=True)
result.differentials[key] = diff_result
return result
def _combine_operation(self, op, other, reverse=False):
"""Combine two representation.
By default, operate on the cartesian representations of both.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self.from_cartesian(result)
# We need to override this setter to support differentials
@BaseRepresentationOrDifferential.shape.setter
def shape(self, shape):
orig_shape = self.shape
# See: https://stackoverflow.com/questions/3336767/ for an example
BaseRepresentationOrDifferential.shape.fset(self, shape)
# also try to perform shape-setting on any associated differentials
try:
for k in self.differentials:
self.differentials[k].shape = shape
except Exception:
BaseRepresentationOrDifferential.shape.fset(self, orig_shape)
for k in self.differentials:
self.differentials[k].shape = orig_shape
raise
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.sqrt(
sum(
getattr(self, component) ** 2
for component, cls in self.attr_classes.items()
if not issubclass(cls, Angle)
)
)
def mean(self, *args, **kwargs):
"""Vector mean.
Averaging is done by converting the representation to cartesian, and
taking the mean of the x, y, and z components. The result is converted
back to the same representation as the input.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
mean : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector mean, in the same representation as that of the input.
"""
self._raise_if_has_differentials("mean")
return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
Adding is done by converting the representation to cartesian, and
summing the x, y, and z components. The result is converted back to the
same representation as the input.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
sum : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector sum, in the same representation as that of the input.
"""
self._raise_if_has_differentials("sum")
return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs))
def dot(self, other):
"""Dot product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation`
The representation to take the dot product with.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of the
cartesian representations of ``self`` and ``other``.
"""
return self.to_cartesian().dot(other)
def cross(self, other):
"""Vector cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to the type of representation of ``self``.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.BaseRepresentation` subclass instance
With vectors perpendicular to both ``self`` and ``other``, in the
same type of representation as ``self``.
"""
self._raise_if_has_differentials("cross")
return self.from_cartesian(self.to_cartesian().cross(other))
class CartesianRepresentation(BaseRepresentation):
"""
Representation of points in 3D cartesian coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z``
have different shapes, they should be broadcastable. If not quantity,
``unit`` should be set. If only ``x`` is given, it is assumed that it
contains an array with the 3 coordinates stored along ``xyz_axis``.
unit : unit-like
If given, the coordinates will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided rather than distinct ``x``, ``y``, and ``z`` (default: 0).
differentials : dict, `CartesianDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CartesianDifferential` instance, or a dictionary of
`CartesianDifferential` s with keys set to a string representation of
the SI unit with which the differential (derivative) is taken. For
example, for a velocity differential on a positional representation, the
key would be ``'s'`` for seconds, indicating that the derivative is a
time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"x": u.Quantity, "y": u.Quantity, "z": u.Quantity}
_xyz = None
def __init__(
self, x, y=None, z=None, unit=None, xyz_axis=None, differentials=None, copy=True
):
if y is None and z is None:
if isinstance(x, np.ndarray) and x.dtype.kind not in "OV":
# Short-cut for 3-D array input.
x = u.Quantity(x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._xyz = x
if xyz_axis:
x = np.moveaxis(x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._x, self._y, self._z = x
self._differentials = self._validate_differentials(differentials)
return
elif (
isinstance(x, CartesianRepresentation)
and unit is None
and xyz_axis is None
):
if differentials is None:
differentials = x._differentials
return super().__init__(x, differentials=differentials, copy=copy)
else:
x, y, z = x
if xyz_axis is not None:
raise ValueError(
"xyz_axis should only be set if x, y, and z are in a single array"
" passed in through x, i.e., y and z should not be not given."
)
if y is None or z is None:
raise ValueError(
f"x, y, and z are required to instantiate {self.__class__.__name__}"
)
if unit is not None:
x = u.Quantity(x, unit, copy=copy, subok=True)
y = u.Quantity(y, unit, copy=copy, subok=True)
z = u.Quantity(z, unit, copy=copy, subok=True)
copy = False
super().__init__(x, y, z, copy=copy, differentials=differentials)
if not (
self._x.unit.is_equivalent(self._y.unit)
and self._x.unit.is_equivalent(self._z.unit)
):
raise u.UnitsError("x, y, and z should have matching physical types")
def unit_vectors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
o = np.broadcast_to(0.0 * u.one, self.shape, subok=True)
return {
"x": CartesianRepresentation(l, o, o, copy=False),
"y": CartesianRepresentation(o, l, o, copy=False),
"z": CartesianRepresentation(o, o, l, copy=False),
}
def scale_factors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"x": l, "y": l, "z": l}
def get_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._xyz is not None:
if self._xyz_axis == xyz_axis:
return self._xyz
else:
return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._x, self._y, self._z], axis=xyz_axis)
xyz = property(get_xyz)
@classmethod
def from_cartesian(cls, other):
return other
def to_cartesian(self):
return self
def transform(self, matrix):
"""
Transform the cartesian coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : ndarray
A 3x3 transformation matrix, such as a rotation matrix.
Examples
--------
We can start off by creating a cartesian representation object:
>>> from astropy import units as u
>>> from astropy.coordinates import CartesianRepresentation
>>> rep = CartesianRepresentation([1, 2] * u.pc,
... [2, 3] * u.pc,
... [3, 4] * u.pc)
We now create a rotation matrix around the z axis:
>>> from astropy.coordinates.matrix_utilities import rotation_matrix
>>> rotation = rotation_matrix(30 * u.deg, axis='z')
Finally, we can apply this transformation:
>>> rep_new = rep.transform(rotation)
>>> rep_new.xyz # doctest: +FLOAT_CMP
<Quantity [[ 1.8660254 , 3.23205081],
[ 1.23205081, 1.59807621],
[ 3. , 4. ]] pc>
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1))
# transformed representation
rep = self.__class__(p, xyz_axis=-1, copy=False)
# Handle differentials attached to this representation
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
try:
other_c = other.to_cartesian()
except Exception:
return NotImplemented
first, second = (self, other_c) if not reverse else (other_c, self)
return self.__class__(
*(
op(getattr(first, component), getattr(second, component))
for component in first.components
)
)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# erfa pm: Modulus of p-vector.
return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1))
def mean(self, *args, **kwargs):
"""Vector mean.
Returns a new CartesianRepresentation instance with the means of the
x, y, and z components.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._apply("mean", *args, **kwargs)
def sum(self, *args, **kwargs):
"""Vector sum.
Returns a new CartesianRepresentation instance with the sums of the
x, y, and z components.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._apply("sum", *args, **kwargs)
def dot(self, other):
"""Dot product of two representations.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of ``self``
and ``other``.
"""
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"can only take dot product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pdp: p-vector inner (=scalar=dot) product.
return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
def cross(self, other):
"""Cross product of two representations.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
cross_product : `~astropy.coordinates.CartesianRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"cannot only take cross product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pxp: p-vector outer (=vector=cross) product.
sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
return self.__class__(sxo, xyz_axis=-1)
class UnitSphericalRepresentation(BaseRepresentation):
"""
Representation of points on a unit sphere.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle'] or str
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude}
@classproperty
def _dimensional_representation(cls):
return SphericalRepresentation
def __init__(self, lon, lat=None, differentials=None, copy=True):
super().__init__(lon, lat, differentials=differentials, copy=copy)
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
# Could let the metaclass define these automatically, but good to have
# a bit clearer docstrings.
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=False),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=False
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = np.broadcast_to(1.0 / u.radian, self.shape, subok=True)
sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian
return {"lon": sf_lon, "lat": sf_lat}
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# erfa s2c: Convert [unit]spherical coordinates to Cartesian.
p = erfa_ufunc.s2c(self.lon, self.lat)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa c2s: P-vector to [unit]spherical coordinates.
return cls(*erfa_ufunc.c2s(p), copy=False)
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO! for differential_class. This cannot (currently) be implemented
# like in the other Representations since `_re_represent_differentials`
# keeps differentials' unit keys, but this can result in a mismatch
# between the UnitSpherical expected key (e.g. "s") and that expected
# in the other class (here "s / m"). For more info, see PR #11467
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(
phi=self.lon, theta=90 * u.deg - self.lat, r=1.0, copy=False
)
elif issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, distance=1.0, copy=False)
return super().represent_as(other_class, differential_class)
def transform(self, matrix):
r"""Transform the unit-spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
Returns
-------
`UnitSphericalRepresentation` or `SphericalRepresentation`
If ``matrix`` is O(3) -- :math:`M \dot M^T = I` -- like a rotation,
then the result is a `UnitSphericalRepresentation`.
All other matrices will change the distance, so the dimensional
representation is used instead.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat = erfa_ufunc.c2s(p)
rep = self.__class__(lon=lon, lat=lat)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
rep = rep.with_differentials(new_diffs)
else: # switch to dimensional representation
rep = self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1, differentials=self.differentials
).transform(matrix)
return rep
def _scale_operation(self, op, *args):
return self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1.0, differentials=self.differentials
)._scale_operation(op, *args)
def __neg__(self):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super().__neg__()
result = self.__class__(self.lon + 180.0 * u.deg, -self.lat, copy=False)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, operator.neg), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units, which is
always unity for vectors on the unit sphere.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self._dimensional_representation.from_cartesian(result)
def mean(self, *args, **kwargs):
"""Vector mean.
The representation is converted to cartesian, the means of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().mean(*args, **kwargs)
)
def sum(self, *args, **kwargs):
"""Vector sum.
The representation is converted to cartesian, the sums of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().sum(*args, **kwargs)
)
def cross(self, other):
"""Cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to `~astropy.coordinates.SphericalRepresentation`.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.SphericalRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().cross(other)
)
class RadialRepresentation(BaseRepresentation):
"""
Representation of the distance of points from the origin.
Note that this is mostly intended as an internal helper representation.
It can do little else but being used as a scale in multiplication.
Parameters
----------
distance : `~astropy.units.Quantity` ['length']
The distance of the point(s) from the origin.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"distance": u.Quantity}
def __init__(self, distance, differentials=None, copy=True):
super().__init__(distance, differentials=differentials, copy=copy)
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
"""Cartesian unit vectors are undefined for radial representation."""
raise NotImplementedError(
f"Cartesian unit vectors are undefined for {self.__class__} instances"
)
def scale_factors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"distance": l}
def to_cartesian(self):
"""Cannot convert radial representation to cartesian."""
raise NotImplementedError(
f"cannot convert {self.__class__} instance to cartesian."
)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to radial coordinate.
"""
return cls(distance=cart.norm(), copy=False)
def __mul__(self, other):
if isinstance(other, BaseRepresentation):
return self.distance * other
else:
return super().__mul__(other)
def norm(self):
"""Vector norm.
Just the distance itself.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return self.distance
def _combine_operation(self, op, other, reverse=False):
return NotImplemented
def transform(self, matrix):
"""Radial representations cannot be transformed by a Cartesian matrix.
Parameters
----------
matrix : array-like
The transformation matrix in a Cartesian basis.
Must be a multiplication: a diagonal matrix with identical elements.
Must have shape (..., 3, 3), where the last 2 indices are for the
matrix on each other axis. Make sure that the matrix shape is
compatible with the shape of this representation.
Raises
------
ValueError
If the matrix is not a multiplication.
"""
scl = matrix[..., 0, 0]
# check that the matrix is a scaled identity matrix on the last 2 axes.
if np.any(matrix != scl[..., np.newaxis, np.newaxis] * np.identity(3)):
raise ValueError(
"Radial representations can only be "
"transformed by a scaled identity matrix"
)
return self * scl
def _spherical_op_funcs(op, *args):
"""For given operator, return functions that adjust lon, lat, distance."""
if op is operator.neg:
return lambda x: x + 180 * u.deg, operator.neg, operator.pos
try:
scale_sign = np.sign(args[0])
except Exception:
# This should always work, even if perhaps we get a negative distance.
return operator.pos, operator.pos, lambda x: op(x, *args)
scale = abs(args[0])
return (
lambda x: x + 180 * u.deg * np.signbit(scale_sign),
lambda x: x * scale_sign,
lambda x: op(x, scale),
)
class SphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle']
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
distance : `~astropy.units.Quantity` ['length']
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude, "distance": u.Quantity}
_unit_representation = UnitSphericalRepresentation
def __init__(self, lon, lat=None, distance=None, differentials=None, copy=True):
super().__init__(lon, lat, distance, copy=copy, differentials=differentials)
if (
not isinstance(self._distance, Distance)
and self._distance.unit.physical_type == "length"
):
try:
self._distance = Distance(self._distance, copy=False)
except ValueError as e:
if e.args[0].startswith("distance must be >= 0"):
raise ValueError(
"Distance must be >= 0. To allow negative distance values, you"
" must explicitly pass in a `Distance` object with the the "
"argument 'allow_negative=True'."
) from e
else:
raise
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=False),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=False
),
"distance": CartesianRepresentation(
coslat * coslon, coslat * sinlon, sinlat, copy=False
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = self.distance / u.radian
sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat)
sf_distance = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"lon": sf_lon, "lat": sf_lat, "distance": sf_distance}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if inspect.isclass(other_class):
if issubclass(other_class, PhysicsSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
phi=self.lon,
theta=90 * u.deg - self.lat,
r=self.distance,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.lon, lat=self.lat, differentials=diffs, copy=False
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.distance, Distance):
d = self.distance.view(u.Quantity)
else:
d = self.distance
# erfa s2p: Convert spherical polar coordinates to p-vector.
p = erfa_ufunc.s2p(self.lon, self.lat, d)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa p2s: P-vector to spherical polar coordinates.
return cls(*erfa_ufunc.p2s(p), copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p)
rep = self.__class__(lon=lon, lat=lat, distance=self.distance * ur)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the distance.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.distance)
def _scale_operation(self, op, *args):
# TODO: expand special-casing to UnitSpherical and RadialDifferential.
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
lon_op, lat_op, distance_op = _spherical_op_funcs(op, *args)
result = self.__class__(
lon_op(self.lon), lat_op(self.lat), distance_op(self.distance), copy=False
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, lat_op, distance_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class PhysicsSphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates (using the physics
convention of using ``phi`` and ``theta`` for azimuth and inclination
from the pole).
Parameters
----------
phi, theta : `~astropy.units.Quantity` or str
The azimuth and inclination of the point(s), in angular units. The
inclination should be between 0 and 180 degrees, and the azimuth will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi`
will be changed inplace if it is not between 0 and 360 degrees.
r : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `PhysicsSphericalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`PhysicsSphericalDifferential` instance, or a dictionary of of
differential instances with keys set to a string representation of the
SI unit with which the differential (derivative) is taken. For example,
for a velocity differential on a positional representation, the key
would be ``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"phi": Angle, "theta": Angle, "r": u.Quantity}
def __init__(self, phi, theta=None, r=None, differentials=None, copy=True):
super().__init__(phi, theta, r, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
# Note that _phi already holds our own copy if copy=True.
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.0 * u.deg) or np.any(self._theta > 180.0 * u.deg):
raise ValueError(
"Inclination angle(s) must be within 0 deg <= angle <= 180 deg, "
f"got {theta.to(u.degree)}"
)
if self._r.unit.physical_type == "length":
self._r = self._r.view(Distance)
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def theta(self):
"""
The elevation of the point(s).
"""
return self._theta
@property
def r(self):
"""
The distance from the origin to the point(s).
"""
return self._r
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
"phi": CartesianRepresentation(-sinphi, cosphi, 0.0, copy=False),
"theta": CartesianRepresentation(
costheta * cosphi, costheta * sinphi, -sintheta, copy=False
),
"r": CartesianRepresentation(
sintheta * cosphi, sintheta * sinphi, costheta, copy=False
),
}
def scale_factors(self):
r = self.r / u.radian
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"phi": r * sintheta, "theta": r, "r": l}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if inspect.isclass(other_class):
if issubclass(other_class, SphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
distance=self.r,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
differentials=diffs,
copy=False,
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.r, Distance):
d = self.r.view(u.Quantity)
else:
d = self.r
x = d * np.sin(self.theta) * np.cos(self.phi)
y = d * np.sin(self.theta) * np.sin(self.phi)
z = d * np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, r=r, copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
# apply transformation in unit-spherical coordinates
xyz = erfa_ufunc.s2c(self.phi, 90 * u.deg - self.theta)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p) # `ur` is transformed unit-`r`
# create transformed physics-spherical representation,
# reapplying the distance scaling
rep = self.__class__(phi=lon, theta=90 * u.deg - lat, r=self.r * ur)
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the radius.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.r)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, adjust_theta_sign, r_op = _spherical_op_funcs(op, *args)
# Also run phi_op on theta to ensure theta remains between 0 and 180:
# any time the scale is negative, we do -theta + 180 degrees.
result = self.__class__(
phi_op(self.phi),
phi_op(adjust_theta_sign(self.theta)),
r_op(self.r),
copy=False,
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, adjust_theta_sign, r_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class CylindricalRepresentation(BaseRepresentation):
"""
Representation of points in 3D cylindrical coordinates.
Parameters
----------
rho : `~astropy.units.Quantity`
The distance from the z axis to the point(s).
phi : `~astropy.units.Quantity` or str
The azimuth of the point(s), in angular units, which will be wrapped
to an angle between 0 and 360 degrees. This can also be instances of
`~astropy.coordinates.Angle`,
z : `~astropy.units.Quantity`
The z coordinate(s) of the point(s)
differentials : dict, `CylindricalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CylindricalDifferential` instance, or a dictionary of of differential
instances with keys set to a string representation of the SI unit with
which the differential (derivative) is taken. For example, for a
velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"rho": u.Quantity, "phi": Angle, "z": u.Quantity}
def __init__(self, rho, phi=None, z=None, differentials=None, copy=True):
super().__init__(rho, phi, z, copy=copy, differentials=differentials)
if not self._rho.unit.is_equivalent(self._z.unit):
raise u.UnitsError("rho and z should have matching physical types")
@property
def rho(self):
"""
The distance of the point(s) from the z-axis.
"""
return self._rho
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def z(self):
"""
The height of the point(s).
"""
return self._z
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
l = np.broadcast_to(1.0, self.shape)
return {
"rho": CartesianRepresentation(cosphi, sinphi, 0, copy=False),
"phi": CartesianRepresentation(-sinphi, cosphi, 0, copy=False),
"z": CartesianRepresentation(0, 0, l, unit=u.one, copy=False),
}
def scale_factors(self):
rho = self.rho / u.radian
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"rho": l, "phi": rho, "z": l}
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to cylindrical polar
coordinates.
"""
rho = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
z = cart.z
return cls(rho=rho, phi=phi, z=z, copy=False)
def to_cartesian(self):
"""
Converts cylindrical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = self.rho * np.cos(self.phi)
y = self.rho * np.sin(self.phi)
z = self.z
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, _, rho_op = _spherical_op_funcs(op, *args)
z_op = lambda x: op(x, *args)
result = self.__class__(
rho_op(self.rho), phi_op(self.phi), z_op(self.z), copy=False
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(rho_op, operator.pos, z_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class BaseDifferential(BaseRepresentationOrDifferential):
r"""A base class representing differentials of representations.
These represent differences or derivatives along each component.
E.g., for physics spherical coordinates, these would be
:math:`\delta r, \delta \theta, \delta \phi`.
Parameters
----------
d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D differentials. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All differential representation classes should subclass this base class,
and define an ``base_representation`` attribute with the class of the
regular `~astropy.coordinates.BaseRepresentation` for which differential
coordinates are provided. This will set up a default ``attr_classes``
instance with names equal to the base component names prefixed by ``d_``,
and all classes set to `~astropy.units.Quantity`, plus properties to access
those, and a default ``__init__`` for initialization.
"""
def __init_subclass__(cls, **kwargs):
"""Set default ``attr_classes`` and component getters on a Differential.
class BaseDifferential(BaseRepresentationOrDifferential):
For these, the components are those of the base representation prefixed
by 'd_', and the class is `~astropy.units.Quantity`.
"""
# Don't do anything for base helper classes.
if cls.__name__ in (
"BaseDifferential",
"BaseSphericalDifferential",
"BaseSphericalCosLatDifferential",
):
return
if not hasattr(cls, "base_representation"):
raise NotImplementedError(
"Differential representations must have a"
'"base_representation" class attribute.'
)
# If not defined explicitly, create attr_classes.
if not hasattr(cls, "attr_classes"):
base_attr_classes = cls.base_representation.attr_classes
cls.attr_classes = {"d_" + c: u.Quantity for c in base_attr_classes}
repr_name = cls.get_name()
if repr_name in DIFFERENTIAL_CLASSES:
raise ValueError(f"Differential class {repr_name} already defined")
DIFFERENTIAL_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# If not defined explicitly, create properties for the components.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(
cls,
component,
property(
_make_getter(component),
doc=f"Component '{component}' of the Differential.",
),
)
super().__init_subclass__(**kwargs)
@classmethod
def _check_base(cls, base):
if cls not in base._compatible_differentials:
raise TypeError(
f"Differential class {cls} is not compatible with the "
f"base (representation) class {base.__class__}"
)
def _get_deriv_key(self, base):
"""Given a base (representation instance), determine the unit of the
derivative by removing the representation unit from the component units
of this differential.
"""
# This check is just a last resort so we don't return a strange unit key
# from accidentally passing in the wrong base.
self._check_base(base)
for name in base.components:
comp = getattr(base, name)
d_comp = getattr(self, f"d_{name}", None)
if d_comp is not None:
d_unit = comp.unit / d_comp.unit
# This is quite a bit faster than using to_system() or going
# through Quantity()
d_unit_si = d_unit.decompose(u.si.bases)
d_unit_si._scale = 1 # remove the scale from the unit
return str(d_unit_si)
else:
raise RuntimeError(
"Invalid representation-differential units! This likely happened "
"because either the representation or the associated differential "
"have non-standard units. Check that the input positional data have "
"positional units, and the input velocity data have velocity units, "
"or are both dimensionless."
)
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors()
def to_cartesian(self, base):
"""Convert the differential to 3D rectangular cartesian coordinates.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
`CartesianDifferential`
This object, converted.
"""
base_e, base_sf = self._get_base_vectors(base)
return functools.reduce(
operator.add,
(
getattr(self, d_c) * base_sf[c] * base_e[c]
for d_c, c in zip(self.components, base.components)
),
)
@classmethod
def from_cartesian(cls, other, base):
"""Convert the differential from 3D rectangular cartesian coordinates to
the desired class.
Parameters
----------
other
The object to convert into this differential.
base : `BaseRepresentation`
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Will be converted to ``cls.base_representation`` if needed.
Returns
-------
`BaseDifferential` subclass instance
A new differential object that is this class' type.
"""
base = base.represent_as(cls.base_representation)
base_e, base_sf = cls._get_base_vectors(base)
return cls(
*(other.dot(e / base_sf[component]) for component, e in base_e.items()),
copy=False,
)
def represent_as(self, other_class, base):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
"""
if other_class is self.__class__:
return self
# The default is to convert via cartesian coordinates.
self_cartesian = self.to_cartesian(base)
if issubclass(other_class, BaseDifferential):
return other_class.from_cartesian(self_cartesian, base)
else:
return other_class.from_cartesian(self_cartesian)
@classmethod
def from_representation(cls, representation, base):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
base : instance of ``cls.base_representation``
The base relative to which the differentials will be defined. If
the representation is a differential itself, the base will be
converted to its ``base_representation`` to help convert it.
"""
if isinstance(representation, BaseDifferential):
cartesian = representation.to_cartesian(
base.represent_as(representation.base_representation)
)
else:
cartesian = representation.to_cartesian()
return cls.from_cartesian(cartesian, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# route transformation through Cartesian
cdiff = self.represent_as(CartesianDifferential, base=base).transform(matrix)
# move back to original representation
diff = cdiff.represent_as(self.__class__, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
"""Scale all components.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
scaled_base : bool, optional
Whether the base was scaled the same way. This affects whether
differential components should be scaled. For instance, a differential
in longitude should not be scaled if its spherical base is scaled
in radius.
"""
scaled_attrs = [op(getattr(self, c), *args) for c in self.components]
return self.__class__(*scaled_attrs, copy=False)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If ``other`` is a representation,
it will be used as a base for which to evaluate the differential,
and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if isinstance(self, type(other)):
first, second = (self, other) if not reverse else (other, self)
return self.__class__(
*[op(getattr(first, c), getattr(second, c)) for c in self.components]
)
else:
try:
self_cartesian = self.to_cartesian(other)
except TypeError:
return NotImplemented
return other._combine_operation(op, self_cartesian, not reverse)
def __sub__(self, other):
# avoid "differential - representation".
if isinstance(other, BaseRepresentation):
return NotImplemented
return super().__sub__(other)
def norm(self, base=None):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Parameters
----------
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. This is
required to calculate the physical size of the differential for
all but Cartesian differentials or radial differentials.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# RadialDifferential overrides this function, so there is no handling here
if not isinstance(self, CartesianDifferential) and base is None:
raise ValueError(
"`base` must be provided to calculate the norm of a"
f" {type(self).__name__}"
)
return self.to_cartesian(base).norm()
class CartesianDifferential(BaseDifferential):
"""Differentials in of points in 3D cartesian coordinates.
Parameters
----------
d_x, d_y, d_z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``,
and ``d_z`` have different shapes, they should be broadcastable. If not
quantities, ``unit`` should be set. If only ``d_x`` is given, it is
assumed that it contains an array with the 3 coordinates stored along
``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the differentials will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CartesianRepresentation
_d_xyz = None
def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None, copy=True):
if d_y is None and d_z is None:
if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in "OV":
# Short-cut for 3-D array input.
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._d_xyz = d_x
if xyz_axis:
d_x = np.moveaxis(d_x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._d_x, self._d_y, self._d_z = d_x
return
else:
d_x, d_y, d_z = d_x
if xyz_axis is not None:
raise ValueError(
"xyz_axis should only be set if d_x, d_y, and d_z are in a single array"
" passed in through d_x, i.e., d_y and d_z should not be not given."
)
if d_y is None or d_z is None:
raise ValueError(
"d_x, d_y, and d_z are required to instantiate"
f" {self.__class__.__name__}"
)
if unit is not None:
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
d_y = u.Quantity(d_y, unit, copy=copy, subok=True)
d_z = u.Quantity(d_z, unit, copy=copy, subok=True)
copy = False
super().__init__(d_x, d_y, d_z, copy=copy)
if not (
self._d_x.unit.is_equivalent(self._d_y.unit)
and self._d_x.unit.is_equivalent(self._d_z.unit)
):
raise u.UnitsError("d_x, d_y and d_z should have equivalent units.")
def to_cartesian(self, base=None):
return CartesianRepresentation(*[getattr(self, c) for c in self.components])
@classmethod
def from_cartesian(cls, other, base=None):
return cls(*[getattr(other, c) for c in other.components])
def transform(self, matrix, base=None, transformed_base=None):
"""Transform differentials using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base, transformed_base : `~astropy.coordinates.CartesianRepresentation` or None, optional
Not used in the Cartesian transformation.
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_d_xyz(xyz_axis=-1))
return self.__class__(p, xyz_axis=-1, copy=False)
def get_d_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
d_xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._d_xyz is not None:
if self._xyz_axis == xyz_axis:
return self._d_xyz
else:
return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _d_xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._d_x, self._d_y, self._d_z], axis=xyz_axis)
d_xyz = property(get_d_xyz)
class BaseSphericalDifferential(BaseDifferential):
def _d_lon_coslat(self, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon * np.cos(base.lat)
@classmethod
def _get_d_lon(cls, d_lon_coslat, base):
"""Convert longitude differential d_lon_coslat to d_lon.
Parameters
----------
d_lon_coslat : `~astropy.units.Quantity`
Longitude differential that includes ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon_coslat / np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (
isinstance(other, BaseSphericalDifferential)
and not isinstance(self, type(other))
or isinstance(other, RadialDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
@classproperty
def _dimensional_differential(cls):
return SphericalDifferential
def __init__(self, d_lon, d_lat=None, copy=True):
super().__init__(d_lon, d_lat, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon and d_lat should have equivalent units.")
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, representation.d_lat)
elif isinstance(
representation,
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential),
):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
# some of this can be moved to the parent class.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lon.unit / base.lon.unit # derivative unit
diff = self._dimensional_differential(
d_lon=self.d_lon, d_lat=self.d_lat, d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The differential longitude and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalDifferential
def __init__(self, d_lon, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon, d_lat, d_distance, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon and d_lat should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_lon, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self.d_lon, -self.d_lat, self.d_distance)
else:
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(
representation.d_phi, -representation.d_theta, representation.d_r
)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_lon, self.d_lat, op(self.d_distance, *args))
else:
return super()._scale_operation(op, *args)
class BaseSphericalCosLatDifferential(BaseDifferential):
"""Differentials from points on a spherical base representation.
With cos(lat) assumed to be included in the longitude differential.
"""
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from (unit)spherical base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates. The scale factor for
longitude does not include the cos(lat) factor.
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors(omit_coslat=True)
def _d_lon(self, base):
"""Convert longitude differential with cos(lat) to one without.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon_coslat / np.cos(base.lat)
@classmethod
def _get_d_lon_coslat(cls, d_lon, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
d_lon : `~astropy.units.Quantity`
Value of the longitude differential without ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon * np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (
isinstance(other, BaseSphericalCosLatDifferential)
and not isinstance(self, type(other))
or isinstance(other, RadialDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalCosLatDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
attr_classes = {"d_lon_coslat": u.Quantity, "d_lat": u.Quantity}
@classproperty
def _dimensional_differential(cls):
return SphericalCosLatDifferential
def __init__(self, d_lon_coslat, d_lat=None, copy=True):
super().__init__(d_lon_coslat, d_lat, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon_coslat and d_lat should have equivalent units.")
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though w/o CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
return cls(representation.d_lon_coslat, representation.d_lat)
elif isinstance(
representation, (SphericalDifferential, UnitSphericalDifferential)
):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lat.unit / base.lat.unit # derivative unit
diff = self._dimensional_differential(
d_lon_coslat=self.d_lon_coslat, d_lat=self.d_lat, d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The differential longitude (with cos(lat) included) and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalCosLatDifferential
attr_classes = {
"d_lon_coslat": u.Quantity,
"d_lat": u.Quantity,
"d_distance": u.Quantity,
}
def __init__(self, d_lon_coslat, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon_coslat and d_lat should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though some need base for the latitude to remove cos(lat).
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self.d_lon_coslat, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalDifferential):
return other_class(self._d_lon(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self._d_lon(base), -self.d_lat, self.d_distance)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat.
if isinstance(representation, SphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta, representation.d_r)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(
self.d_lon_coslat, self.d_lat, op(self.d_distance, *args)
)
else:
return super()._scale_operation(op, *args)
class RadialDifferential(BaseDifferential):
"""Differential(s) of radial distances.
Parameters
----------
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = RadialRepresentation
def to_cartesian(self, base):
unit_vec = base.represent_as(UnitSphericalRepresentation).to_cartesian()
return self.d_distance * unit_vec
def norm(self, base=None):
return self.d_distance
@classmethod
def from_cartesian(cls, other, base):
return cls(
other.dot(base.represent_as(UnitSphericalRepresentation)), copy=False
)
@classmethod
def from_representation(cls, representation, base=None):
if isinstance(
representation, (SphericalDifferential, SphericalCosLatDifferential)
):
return cls(representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_r)
else:
return super().from_representation(representation, base)
def _combine_operation(self, op, other, reverse=False):
if isinstance(other, self.base_representation):
if reverse:
first, second = other.distance, self.d_distance
else:
first, second = self.d_distance, other.distance
return other.__class__(op(first, second), copy=False)
elif isinstance(
other, (BaseSphericalDifferential, BaseSphericalCosLatDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalDifferential(**result_args)
else:
return super()._combine_operation(op, other, reverse)
class PhysicsSphericalDifferential(BaseDifferential):
"""Differential(s) of 3D spherical coordinates using physics convention.
Parameters
----------
d_phi, d_theta : `~astropy.units.Quantity`
The differential azimuth and inclination.
d_r : `~astropy.units.Quantity`
The differential radial distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = PhysicsSphericalRepresentation
def __init__(self, d_phi, d_theta=None, d_r=None, copy=True):
super().__init__(d_phi, d_theta, d_r, copy=copy)
if not self._d_phi.unit.is_equivalent(self._d_theta.unit):
raise u.UnitsError("d_phi and d_theta should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude. For those, explicitly
# do the equivalent of self._d_lon_coslat in SphericalDifferential.
if issubclass(other_class, SphericalDifferential):
return other_class(self.d_phi, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_phi, -self.d_theta)
elif issubclass(other_class, SphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_r)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat. For that case,
# do the equivalent of cls._d_lon in SphericalDifferential.
if isinstance(representation, SphericalDifferential):
return cls(
representation.d_lon, -representation.d_lat, representation.d_distance
)
elif isinstance(representation, SphericalCosLatDifferential):
cls._check_base(base)
d_phi = representation.d_lon_coslat / np.sin(base.theta)
return cls(d_phi, -representation.d_lat, representation.d_distance)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_phi, self.d_theta, op(self.d_r, *args))
else:
return super()._scale_operation(op, *args)
class CylindricalDifferential(BaseDifferential):
"""Differential(s) of points in cylindrical coordinates.
Parameters
----------
d_rho : `~astropy.units.Quantity` ['speed']
The differential cylindrical radius.
d_phi : `~astropy.units.Quantity` ['angular speed']
The differential azimuth.
d_z : `~astropy.units.Quantity` ['speed']
The differential height.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CylindricalRepresentation
def __init__(self, d_rho, d_phi=None, d_z=None, copy=False):
super().__init__(d_rho, d_phi, d_z, copy=copy)
if not self._d_rho.unit.is_equivalent(self._d_z.unit):
raise u.UnitsError("d_rho and d_z should have equivalent units.")
|
34f1cb182218a3aa3ca92a1fec55e6bc8e00a9fef3f009e34f89f0c7cca60c93 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains utility functions for working with angles. These are both
used internally in astropy.coordinates.angles, and of possible
"""
__all__ = [
"angular_separation",
"position_angle",
"offset_by",
"golden_spiral_grid",
"uniform_spherical_random_surface",
"uniform_spherical_random_volume",
]
# Third-party
import numpy as np
# Astropy
import astropy.units as u
from astropy.coordinates.representation import (
SphericalRepresentation,
UnitSphericalRepresentation,
)
_TWOPI = 2 * np.pi
def angular_separation(lon1, lat1, lon2, lat2):
"""
Angular separation between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
angular separation : `~astropy.units.Quantity` ['angle'] or float
Type depends on input; ``Quantity`` in angular units, or float in
radians.
Notes
-----
The angular separation is calculated using the Vincenty formula [1]_,
which is slightly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.arctan2(np.hypot(num1, num2), denominator)
def position_angle(lon1, lat1, lon2, lat2):
"""
Position Angle (East of North) between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from position 1 to
position 2. If any of the angles are arrays, this will contain an array
following the appropriate `numpy` broadcasting rules.
"""
from .angles import Angle
deltalon = lon2 - lon1
colat = np.cos(lat2)
x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon)
y = np.sin(deltalon) * colat
return Angle(np.arctan2(y, x), u.radian).wrap_at(360 * u.deg)
def offset_by(lon, lat, posang, distance):
"""
Point with the given offset from the given point.
Parameters
----------
lon, lat, posang, distance : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the starting point,
position angle and distance to the final point.
Quantities should be in angular units; floats in radians.
Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon.
Returns
-------
lon, lat : `~astropy.coordinates.Angle`
The position of the final point. If any of the angles are arrays,
these will contain arrays following the appropriate `numpy` broadcasting rules.
0 <= lon < 2pi.
"""
from .angles import Angle
# Calculations are done using the spherical trigonometry sine and cosine rules
# of the triangle A at North Pole, B at starting point, C at final point
# with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang)
# with sides a (distance), b (final co-latitude), c (starting colatitude)
# B, a, c are knowns; A and b are unknowns
# https://en.wikipedia.org/wiki/Spherical_trigonometry
cos_a = np.cos(distance)
sin_a = np.sin(distance)
cos_c = np.sin(lat)
sin_c = np.cos(lat)
cos_B = np.cos(posang)
sin_B = np.sin(posang)
# cosine rule: Know two sides: a,c and included angle: B; get unknown side b
cos_b = cos_c * cos_a + sin_c * sin_a * cos_B
# sin_b = np.sqrt(1 - cos_b**2)
# sine rule and cosine rule for A (using both lets arctan2 pick quadrant).
# multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors
# at poles. Correct for the x=0 multiplication a few lines down.
# sin_A/sin_a == sin_B/sin_b # Sine rule
xsin_A = sin_a * sin_B * sin_c
# cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule
xcos_A = cos_a - cos_b * cos_c
A = Angle(np.arctan2(xsin_A, xcos_A), u.radian)
# Treat the poles as if they are infinitesimally far from pole but at given lon
small_sin_c = sin_c < 1e-12
if small_sin_c.any():
# For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang
A_pole = (90 * u.deg + cos_c * (90 * u.deg - Angle(posang, u.radian))).to(u.rad)
if A.shape:
# broadcast to ensure the shape is like that of A, which is also
# affected by the (possible) shapes of lat, posang, and distance.
small_sin_c = np.broadcast_to(small_sin_c, A.shape)
A[small_sin_c] = A_pole[small_sin_c]
else:
A = A_pole
outlon = (Angle(lon, u.radian) + A).wrap_at(360.0 * u.deg).to(u.deg)
outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg)
return outlon, outlat
def golden_spiral_grid(size):
"""Generate a grid of points on the surface of the unit sphere using the
Fibonacci or Golden Spiral method.
.. seealso::
`Evenly distributing points on a sphere <https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere>`_
Parameters
----------
size : int
The number of points to generate.
Returns
-------
rep : `~astropy.coordinates.UnitSphericalRepresentation`
The grid of points.
"""
golden_r = (1 + 5**0.5) / 2
grid = np.arange(0, size, dtype=float) + 0.5
lon = _TWOPI / golden_r * grid * u.rad
lat = np.arcsin(1 - 2 * grid / size) * u.rad
return UnitSphericalRepresentation(lon, lat)
def uniform_spherical_random_surface(size=1):
"""Generate a random sampling of points on the surface of the unit sphere.
Parameters
----------
size : int
The number of points to generate.
Returns
-------
rep : `~astropy.coordinates.UnitSphericalRepresentation`
The random points.
"""
rng = np.random # can maybe switch to this being an input later - see #11628
lon = rng.uniform(0, _TWOPI, size) * u.rad
lat = np.arcsin(rng.uniform(-1, 1, size=size)) * u.rad
return UnitSphericalRepresentation(lon, lat)
def uniform_spherical_random_volume(size=1, max_radius=1):
"""Generate a random sampling of points that follow a uniform volume
density distribution within a sphere.
Parameters
----------
size : int
The number of points to generate.
max_radius : number, quantity-like, optional
A dimensionless or unit-ful factor to scale the random distances.
Returns
-------
rep : `~astropy.coordinates.SphericalRepresentation`
The random points.
"""
rng = np.random # can maybe switch to this being an input later - see #11628
usph = uniform_spherical_random_surface(size=size)
r = np.cbrt(rng.uniform(size=size)) * u.Quantity(max_radius, copy=False)
return SphericalRepresentation(usph.lon, usph.lat, r)
from astropy.coordinates import angle_formats
# # below here can be deleted in v5.0
from astropy.utils.decorators import deprecated
__old_angle_utilities_funcs = [
"check_hms_ranges",
"degrees_to_dms",
"degrees_to_string",
"dms_to_degrees",
"format_exception",
"hms_to_degrees",
"hms_to_dms",
"hms_to_hours",
"hms_to_radians",
"hours_to_decimal",
"hours_to_hms",
"hours_to_radians",
"hours_to_string",
"parse_angle",
"radians_to_degrees",
"radians_to_dms",
"radians_to_hms",
"radians_to_hours",
"sexagesimal_to_string",
]
for funcname in __old_angle_utilities_funcs:
vars()[funcname] = deprecated(
name="astropy.coordinates.angle_utilities." + funcname,
alternative="astropy.coordinates.angle_formats." + funcname,
since="v4.3",
)(getattr(angle_formats, funcname))
|
2f90d8459710938d3d3e9dac2b986bbed8d77f7d361f0128da9faffb62c1233f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import inspect
import subprocess
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import contextmanager, suppress
from inspect import signature
from warnings import warn
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"TransformGraph",
"CoordinateTransform",
"FunctionTransform",
"BaseAffineTransform",
"AffineTransform",
"StaticMatrixTransform",
"DynamicMatrixTransform",
"FunctionTransformWithFiniteDifference",
"CompositeTransform",
]
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, "name", None)
if nm is not None:
if not isinstance(nm, list):
nm = [nm]
for name in nm:
dct[name] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this `TransformGraph`.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""
Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : `CoordinateTransform`
The transformation object. Typically a `CoordinateTransform` object,
although it may be some other callable that is called with the same
signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
if not callable(transform):
raise TypeError("transform must be callable")
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError(
f"Frame(s) {list(invalid_frames)} contain invalid attribute names:"
f" {invalid_attrs}\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
)
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or None
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or None
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or None
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError("fromsys and tosys must both be None if either are")
if transform is None:
raise ValueError("cannot give all Nones to remove_transform")
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if agraph[b] is transform:
del agraph[b]
fromsys = a
break
# If the transform was found, need to break out of the outer for loop too
if fromsys:
break
else:
raise ValueError(f"Could not find transform {transform} in the graph")
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError(
f"Current transform from {fromsys} to {tosys} is not"
f" {transform}"
)
# Remove the subgraph if it is now empty
if self._graph[fromsys] == {}:
self._graph.pop(fromsys)
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of class or None
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : float or int
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float("inf")
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, "priority") else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(getattr(agraph[b], "priority", 1))
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError(
"n2 not in heap - this should be impossible!"
)
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""
Generates and returns the `CompositeTransform` for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `CompositeTransform` or None
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
This function always returns a `CompositeTransform`, because
`CompositeTransform` is slightly more adaptable in the way it can be
called than other transform classes. Specifically, it takes care of
intermediate steps of transformations in a way that is consistent with
1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys is not a class")
if not inspect.isclass(tosys):
raise TypeError("tosys is not a class")
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(
transforms, fromsys, tosys, register_graph=False
)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
`BaseCoordinateFrame` subclass
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(
self,
priorities=True,
addnodes=[],
savefn=None,
savelayout="plain",
saveformat=None,
color_edges=True,
):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : None or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = {
f: [k for k, v in self._cached_names.items() if v == f]
for f in self.frame_set
}
for n in nodes:
if n in invclsaliases:
aliases = "`\\n`".join(invclsaliases[n])
nodenames.append(
'{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, aliases)
)
else:
nodenames.append(n.__name__ + "[ shape=oval ]")
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__] if color_edges else "black"
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ["digraph AstropyCoordinateTransformGraph {"]
lines.append("graph [rankdir=LR]")
lines.append("; ".join(nodenames) + ";")
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = "[ {0} {1} ]"
if priorities:
priority_part = f'label = "{weights}"'
else:
priority_part = ""
color_part = f'color = "{color}"'
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append(f"{enm1} -> {enm2}{labelstr};")
lines.append("")
lines.append("overlap=false")
lines.append("}")
dotgraph = "\n".join(lines)
if savefn is not None:
if savelayout == "plain":
with open(savefn, "w") as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append("-T" + saveformat)
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError("problem running graphviz: \n" + stderr)
with open(savefn, "w") as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <https://networkx.github.io/>`_
package installed for this to work.
Returns
-------
nxgraph : ``networkx.Graph``
This `TransformGraph` as a `networkx.Graph <https://networkx.github.io/documentation/stable/reference/classes/graph.html>`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""
A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third
are ``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use `add_transform` instead of
using this decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(
func, fromsys, tosys, priority=priority, register_graph=self, **kwargs
)
return func
return deco
def _add_merged_transform(self, fromsys, tosys, *furthersys, priority=1):
"""
Add a single-step transform that encapsulates a multi-step transformation path,
using the transforms that already exist in the graph.
The created transform internally calls the existing transforms. If all of the
transforms are affine, the merged transform is
`~astropy.coordinates.transformations.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.transformations.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`.
This method is primarily useful for defining loopback transformations
(i.e., where ``fromsys`` and the final ``tosys`` are the same).
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform to.
*furthersys : class
Additional coordinate frame classes to transform to in order.
priority : number
The priority of this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Notes
-----
Even though the created transform is a single step in the graph, it
will still internally call the constituent transforms. Thus, there is
no performance benefit for using this created transform.
For Astropy's built-in frames, loopback transformations typically use
`~astropy.coordinates.ICRS` to be safe. Tranforming through an inertial
frame ensures that changes in observation time and observer
location/velocity are properly accounted for.
An error will be raised if a direct transform between ``fromsys`` and
``tosys`` already exist.
"""
frames = [fromsys, tosys, *furthersys]
lastsys = frames[-1]
full_path = self.get_transform(fromsys, lastsys)
transforms = [
self.get_transform(frame_a, frame_b)
for frame_a, frame_b in zip(frames[:-1], frames[1:])
]
if None in transforms:
raise ValueError("This transformation path is not possible")
if len(full_path.transforms) == 1:
raise ValueError(
f"A direct transform for {fromsys.__name__}->{lastsys.__name__} already"
" exists"
)
self.add_transform(
fromsys,
lastsys,
CompositeTransform(
transforms, fromsys, lastsys, priority=priority
)._as_single_transform(),
)
@contextmanager
def impose_finite_difference_dt(self, dt):
"""
Context manager to impose a finite-difference time step on all applicable transformations
For each transformation in this transformation graph that has the attribute
``finite_difference_dt``, that attribute is set to the provided value. The only standard
transformation with this attribute is
`~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`.
Parameters
----------
dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the finite difference.
If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value.
"""
key = "finite_difference_dt"
saved_settings = []
try:
for to_frames in self._graph.values():
for transform in to_frames.values():
if hasattr(transform, key):
old_setting = (transform, key, getattr(transform, key))
saved_settings.append(old_setting)
setattr(transform, key, dt)
yield
finally:
for setting in saved_settings:
setattr(*setting)
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to start from.
tosys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError("fromsys and tosys must be classes")
self.overlapping_frame_attr_names = overlap = []
if hasattr(fromsys, "frame_attributes") and hasattr(tosys, "frame_attributes"):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.frame_attributes:
if from_nm in tosys.frame_attributes:
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : `TransformGraph` object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary ``tosys.frame_attributes``.
Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : `BaseCoordinateFrame` subclass instance
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError("func must be callable")
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (
len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and sig.VAR_POSITIONAL not in kinds
):
raise ValueError("provided function does not accept two arguments")
self.func = func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError(
f"the transformation function yielded {res} but "
f"should have been of type {self.tosys}"
)
if fromcoord.data.differentials and not res.data.differentials:
warn(
"Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.",
AstropyWarning,
)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""
A coordinate transformation that works like a `FunctionTransform`, but
computes velocity shifts based on the finite-difference relative to one of
the frame attributes. Note that the transform function should *not* change
the differential at all in this case, as any differentials will be
overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any existing differential.
finite_difference_dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`FunctionTransform`.
"""
def __init__(
self,
func,
fromsys,
tosys,
priority=1,
register_graph=None,
finite_difference_frameattr_name="obstime",
finite_difference_dt=1 * u.second,
symmetric_finite_difference=True,
):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError(
f"Frame attribute name {value} is not a frame attribute of"
f" {self.fromsys} or {self.tosys}"
)
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import CartesianDifferential, CartesianRepresentation
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt / 2
from_diffless = fromcoord.realize_frame(
fromcoord.data.without_differentials()
)
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (
fromcoord_cart.xyz
+ fromcoord_cart.differentials["s"].d_xyz * halfdt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
backxyz = (
fromcoord_cart.xyz
- fromcoord_cart.differentials["s"].d_xyz * halfdt
)
back = supcall(
fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe
)
else:
fwdxyz = (
fromcoord_cart.xyz + fromcoord_cart.differentials["s"].d_xyz * dt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(
newdiff
)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because ``AffineTransform`` and the matrix
transform classes share the ``__call__()`` method, but differ in how they
generate the affine parameters. ``StaticMatrixTransform`` passes in a
matrix stored as a class attribute, and both of the matrix transforms pass
in ``None`` for the offset. Hence, user subclasses would likely want to
subclass this (rather than ``AffineTransform``) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (
CartesianDifferential,
RadialDifferential,
SphericalCosLatDifferential,
SphericalDifferential,
UnitSphericalRepresentation,
)
data = fromcoord.data
has_velocity = "s" in data.differentials
# Bail out if no transform is actually requested
if matrix is None and offset is None:
return data
# list of unit differentials
_unit_diffs = (
SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential,
)
unit_vel_diff = has_velocity and isinstance(
data.differentials["s"], _unit_diffs
)
rad_vel_diff = has_velocity and isinstance(
data.differentials["s"], RadialDifferential
)
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError(
"Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {data.__class__})"
)
elif (
has_velocity
and (unit_vel_diff or rad_vel_diff)
and offset is not None
and "s" in offset.differentials
):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError(
"Velocity information stored on coordinate frame is insufficient to do"
" a full-space velocity transformation (differential class:"
f" {data.differentials['s'].__class__})"
)
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError(
"Representation passed to AffineTransform contains multiple associated"
" differentials. Only a single differential with velocity units is"
f" presently supported (differentials: {data.differentials})."
)
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (
has_velocity
and isinstance(data, UnitSphericalRepresentation)
and not unit_vel_diff
and not rad_vel_diff
):
# retrieve just velocity differential
unit_diff = data.differentials["s"].represent_as(
data.differentials["s"]._unit_differential, data
)
data = data.with_differentials({"s": unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = {
k: diff.represent_as(CartesianDifferential, data)
for k, diff in data.differentials.items()
}
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = rep.without_differentials() + offset.without_differentials()
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials["s"] # already in Cartesian form
if offset is not None and "s" in offset.differentials:
veldiff = veldiff + offset.differentials["s"]
newrep = newrep.with_differentials({"s": veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials["s"]
_unit_cls = fromcoord.data.differentials["s"]._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = {comp: getattr(newdiff, comp) for comp in newdiff.components}
kwargs["d_distance"] = fromcoord.data.differentials["s"].d_distance
diffs = {
"s": fromcoord.data.differentials["s"].__class__(
copy=False, **kwargs
)
}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials["s"].represent_as(
fromcoord.data.differentials["s"].__class__, newrep
)
diffs = {"s": newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials["s"].__class__
newrep = newrep.represent_as(
fromcoord.data.__class__, diff_cls._dimensional_differential
)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials({"s": fromcoord.data.differentials["s"]})
return newrep
def __call__(self, fromcoord, toframe):
params = self._affine_params(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, *params)
return toframe.realize_frame(newrep)
@abstractmethod
def _affine_params(self, fromcoord, toframe):
pass
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.builtin_frames.galactocentric.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(transform_func):
raise TypeError("transform_func is not callable")
self.transform_func = transform_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.transform_func(fromcoord, toframe)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError("Provided matrix is not 3 x 3")
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix, None
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(matrix_func):
raise TypeError("matrix_func is not callable")
self.matrix_func = matrix_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `CoordinateTransform` object
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `StaticMatrixTransform` will be collapsed into a
single transformation to speed up the calculation.
"""
def __init__(
self,
transforms,
fromsys,
tosys,
priority=1,
register_graph=None,
collapse_static_mats=True,
):
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of `StaticMatrixTransform`s into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if isinstance(lasttrans, StaticMatrixTransform) and isinstance(
currtrans, StaticMatrixTransform
):
newtrans[-1] = StaticMatrixTransform(
currtrans.matrix @ lasttrans.matrix,
lasttrans.fromsys,
currtrans.tosys,
)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `toframe`, or if not there, `fromcoord`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.frame_attributes:
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutable, so copying is not needed
return curr_coord
def _as_single_transform(self):
"""
Return an encapsulated version of the composite transform so that it appears to
be a single transform.
The returned transform internally calls the constituent transforms. If all of
the transforms are affine, the merged transform is
`~astropy.coordinates.transformations.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.transformations.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`.
"""
# Create a list of the transforms including flattening any constituent CompositeTransform
transforms = [
t if not isinstance(t, CompositeTransform) else t._as_single_transform()
for t in self.transforms
]
if all([isinstance(t, BaseAffineTransform) for t in transforms]):
# Check if there may be an origin shift
fixed_origin = all(
isinstance(t, (StaticMatrixTransform, DynamicMatrixTransform))
for t in transforms
)
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return None if fixed_origin else (None, None)
# Create a merged attribute dictionary for any intermediate frames
# For any attributes shared by the "from"/"to" frames, the "to" frame takes
# precedence because this is the same choice implemented in __call__()
merged_attr = {
name: getattr(from_coo, name) for name in from_coo.frame_attributes
}
merged_attr.update(
{
name: getattr(to_frame, name)
for name in to_frame.frame_attributes
}
)
affine_params = (None, None)
# Step through each transform step (frame A -> frame B)
for i, t in enumerate(transforms):
# Extract the relevant attributes for frame A
if i == 0:
# If frame A is actually the initial frame, preserve its attributes
a_attr = {
name: getattr(from_coo, name)
for name in from_coo.frame_attributes
}
else:
a_attr = {
k: v
for k, v in merged_attr.items()
if k in t.fromsys.frame_attributes
}
# Extract the relevant attributes for frame B
b_attr = {
k: v
for k, v in merged_attr.items()
if k in t.tosys.frame_attributes
}
# Obtain the affine parameters for the transform
# Note that we insert some dummy data into frame A because the transformation
# machinery requires there to be data present. Removing that limitation
# is a possible TODO, but some care would need to be taken because some affine
# transforms have branching code depending on the presence of differentials.
next_affine_params = t._affine_params(
t.fromsys(from_coo.data, **a_attr), t.tosys(**b_attr)
)
# Combine the affine parameters with the running set
affine_params = _combine_affine_params(
affine_params, next_affine_params
)
# If there is no origin shift, return only the matrix
return affine_params[0] if fixed_origin else affine_params
# The return type depends on whether there is any origin shift
transform_type = DynamicMatrixTransform if fixed_origin else AffineTransform
else:
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return to_frame.realize_frame(from_coo.data)
return self(from_coo, to_frame)
transform_type = FunctionTransformWithFiniteDifference
return transform_type(
single_transform, self.fromsys, self.tosys, priority=self.priority
)
def _combine_affine_params(params, next_params):
"""
Combine two sets of affine parameters.
The parameters for an affine transformation are a 3 x 3 Cartesian
transformation matrix and a displacement vector, which can include an
attached velocity. Either type of parameter can be ``None``.
"""
M, vec = params
next_M, next_vec = next_params
# Multiply the transformation matrices if they both exist
if M is not None and next_M is not None:
new_M = next_M @ M
else:
new_M = M if M is not None else next_M
if vec is not None:
# Transform the first displacement vector by the second transformation matrix
if next_M is not None:
vec = vec.transform(next_M)
# Calculate the new displacement vector
if next_vec is not None:
if "s" in vec.differentials and "s" in next_vec.differentials:
# Adding vectors with velocities takes more steps
# TODO: Add support in representation.py
new_vec_velocity = vec.differentials["s"] + next_vec.differentials["s"]
new_vec = vec.without_differentials() + next_vec.without_differentials()
new_vec = new_vec.with_differentials({"s": new_vec_velocity})
else:
new_vec = vec + next_vec
else:
new_vec = vec
else:
new_vec = next_vec
return new_M, new_vec
# map class names to colorblind-safe colors
trans_to_color = {}
trans_to_color[AffineTransform] = "#555555" # gray
trans_to_color[FunctionTransform] = "#783001" # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = "#d95f02" # red-ish
trans_to_color[StaticMatrixTransform] = "#7570b3" # blue-ish
trans_to_color[DynamicMatrixTransform] = "#1b9e77" # green-ish
|
56320420f981e2052df1b31eee13c26e882a659d413c74cc1ae1b44dfa96e3e2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Framework and base classes for coordinate frames/"low-level" coordinate
classes.
"""
# Standard library
import copy
import inspect
import warnings
from collections import defaultdict, namedtuple
# Dependencies
import numpy as np
from astropy import units as u
from astropy.utils import ShapedLikeNDArray, check_broadcast
# Project
from astropy.utils.decorators import deprecated, format_doc, lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from . import representation as r
from .angles import Angle
from .attributes import Attribute
from .transformations import TransformGraph
__all__ = [
"BaseCoordinateFrame",
"frame_transform_graph",
"GenericFrame",
"RepresentationMapping",
]
# the graph used for all transformations between frames
frame_transform_graph = TransformGraph()
def _get_repr_cls(value):
"""
Return a valid representation class from ``value`` or raise exception.
"""
if value in r.REPRESENTATION_CLASSES:
value = r.REPRESENTATION_CLASSES[value]
elif not isinstance(value, type) or not issubclass(value, r.BaseRepresentation):
raise ValueError(
f"Representation is {value!r} but must be a BaseRepresentation class "
f"or one of the string aliases {list(r.REPRESENTATION_CLASSES)}"
)
return value
def _get_diff_cls(value):
"""
Return a valid differential class from ``value`` or raise exception.
As originally created, this is only used in the SkyCoord initializer, so if
that is refactored, this function my no longer be necessary.
"""
if value in r.DIFFERENTIAL_CLASSES:
value = r.DIFFERENTIAL_CLASSES[value]
elif not isinstance(value, type) or not issubclass(value, r.BaseDifferential):
raise ValueError(
f"Differential is {value!r} but must be a BaseDifferential class "
f"or one of the string aliases {list(r.DIFFERENTIAL_CLASSES)}"
)
return value
def _get_repr_classes(base, **differentials):
"""Get valid representation and differential classes.
Parameters
----------
base : str or `~astropy.coordinates.BaseRepresentation` subclass
class for the representation of the base coordinates. If a string,
it is looked up among the known representation classes.
**differentials : dict of str or `~astropy.coordinates.BaseDifferentials`
Keys are like for normal differentials, i.e., 's' for a first
derivative in time, etc. If an item is set to `None`, it will be
guessed from the base class.
Returns
-------
repr_classes : dict of subclasses
The base class is keyed by 'base'; the others by the keys of
``diffferentials``.
"""
base = _get_repr_cls(base)
repr_classes = {"base": base}
for name, differential_type in differentials.items():
if differential_type == "base":
# We don't want to fail for this case.
differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None)
elif differential_type in r.DIFFERENTIAL_CLASSES:
differential_type = r.DIFFERENTIAL_CLASSES[differential_type]
elif differential_type is not None and (
not isinstance(differential_type, type)
or not issubclass(differential_type, r.BaseDifferential)
):
raise ValueError(
"Differential is {differential_type!r} but must be a BaseDifferential"
f" class or one of the string aliases {list(r.DIFFERENTIAL_CLASSES)}"
)
repr_classes[name] = differential_type
return repr_classes
_RepresentationMappingBase = namedtuple(
"RepresentationMapping", ("reprname", "framename", "defaultunit")
)
class RepresentationMapping(_RepresentationMappingBase):
"""
This `~collections.namedtuple` is used with the
``frame_specific_representation_info`` attribute to tell frames what
attribute names (and default units) to use for a particular representation.
``reprname`` and ``framename`` should be strings, while ``defaultunit`` can
be either an astropy unit, the string ``'recommended'`` (which is degrees
for Angles, nothing otherwise), or None (to indicate that no unit mapping
should be done).
"""
def __new__(cls, reprname, framename, defaultunit="recommended"):
# this trick just provides some defaults
return super().__new__(cls, reprname, framename, defaultunit)
base_doc = """{__doc__}
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` subclass instance
A representation object or ``None`` to have no data (or use the
coordinate component arguments, see below).
{components}
representation_type : `~astropy.coordinates.BaseRepresentation` subclass, str, optional
A representation class or string name of a representation class. This
sets the expected input representation class, thereby changing the
expected keyword arguments for the data passed in. For example, passing
``representation_type='cartesian'`` will make the classes expect
position data with cartesian names, i.e. ``x, y, z`` in most cases
unless overridden via ``frame_specific_representation_info``. To see this
frame's names, check out ``<this frame>().representation_info``.
differential_type : `~astropy.coordinates.BaseDifferential` subclass, str, dict, optional
A differential class or dictionary of differential classes (currently
only a velocity differential with key 's' is supported). This sets the
expected input differential class, thereby changing the expected keyword
arguments of the data passed in. For example, passing
``differential_type='cartesian'`` will make the classes expect velocity
data with the argument names ``v_x, v_y, v_z`` unless overridden via
``frame_specific_representation_info``. To see this frame's names,
check out ``<this frame>().representation_info``.
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
{footer}
"""
_components = """
*args, **kwargs
Coordinate components, with names that depend on the subclass.
"""
@format_doc(base_doc, components=_components, footer="")
class BaseCoordinateFrame(ShapedLikeNDArray):
"""
The base class for coordinate frames.
This class is intended to be subclassed to create instances of specific
systems. Subclasses can implement the following attributes:
* `default_representation`
A subclass of `~astropy.coordinates.BaseRepresentation` that will be
treated as the default representation of this frame. This is the
representation assumed by default when the frame is created.
* `default_differential`
A subclass of `~astropy.coordinates.BaseDifferential` that will be
treated as the default differential class of this frame. This is the
differential class assumed by default when the frame is created.
* `~astropy.coordinates.Attribute` class attributes
Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined
using a descriptor class. See the narrative documentation or
built-in classes code for details.
* `frame_specific_representation_info`
A dictionary mapping the name or class of a representation to a list of
`~astropy.coordinates.RepresentationMapping` objects that tell what
names and default units should be used on this frame for the components
of that representation.
Unless overridden via `frame_specific_representation_info`, velocity name
defaults are:
* ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `SphericalCosLatDifferential`
proper motion components
* ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper motion
components
* ``radial_velocity`` for any ``d_distance`` component
* ``v_{x,y,z}`` for `CartesianDifferential` velocity components
where ``{lon}`` and ``{lat}`` are the frame names of the angular components.
"""
default_representation = None
default_differential = None
# Specifies special names and units for representation and differential
# attributes.
frame_specific_representation_info = {}
frame_attributes = {}
# Default empty frame_attributes dict
def __init_subclass__(cls, **kwargs):
# We first check for explicitly set values for these:
default_repr = getattr(cls, "default_representation", None)
default_diff = getattr(cls, "default_differential", None)
repr_info = getattr(cls, "frame_specific_representation_info", None)
# Then, to make sure this works for subclasses-of-subclasses, we also
# have to check for cases where the attribute names have already been
# replaced by underscore-prefaced equivalents by the logic below:
if default_repr is None or isinstance(default_repr, property):
default_repr = getattr(cls, "_default_representation", None)
if default_diff is None or isinstance(default_diff, property):
default_diff = getattr(cls, "_default_differential", None)
if repr_info is None or isinstance(repr_info, property):
repr_info = getattr(cls, "_frame_specific_representation_info", None)
repr_info = cls._infer_repr_info(repr_info)
# Make read-only properties for the frame class attributes that should
# be read-only to make them immutable after creation.
# We copy attributes instead of linking to make sure there's no
# accidental cross-talk between classes
cls._create_readonly_property(
"default_representation",
default_repr,
"Default representation for position data",
)
cls._create_readonly_property(
"default_differential",
default_diff,
"Default representation for differential data (e.g., velocity)",
)
cls._create_readonly_property(
"frame_specific_representation_info",
copy.deepcopy(repr_info),
"Mapping for frame-specific component names",
)
# Set the frame attributes. We first construct the attributes from
# superclasses, going in reverse order to keep insertion order,
# and then add any attributes from the frame now being defined
# (if any old definitions are overridden, this keeps the order).
# Note that we cannot simply start with the inherited frame_attributes
# since we could be a mixin between multiple coordinate frames.
# TODO: Should this be made to use readonly_prop_factory as well or
# would it be inconvenient for getting the frame_attributes from
# classes?
frame_attrs = {}
for basecls in reversed(cls.__bases__):
if issubclass(basecls, BaseCoordinateFrame):
frame_attrs.update(basecls.frame_attributes)
for k, v in cls.__dict__.items():
if isinstance(v, Attribute):
frame_attrs[k] = v
cls.frame_attributes = frame_attrs
# Deal with setting the name of the frame:
if not hasattr(cls, "name"):
cls.name = cls.__name__.lower()
elif BaseCoordinateFrame not in cls.__bases__ and cls.name in [
getattr(base, "name", None) for base in cls.__bases__
]:
# This may be a subclass of a subclass of BaseCoordinateFrame,
# like ICRS(BaseRADecFrame). In this case, cls.name will have been
# set by init_subclass
cls.name = cls.__name__.lower()
# A cache that *must be unique to each frame class* - it is
# insufficient to share them with superclasses, hence the need to put
# them in the meta
cls._frame_class_cache = {}
super().__init_subclass__(**kwargs)
# call this once here to initialize defaults
# (via FrameAttribute.__get__/convert_input)
cls.get_frame_attr_defaults()
def __init__(
self,
*args,
copy=True,
representation_type=None,
differential_type=None,
**kwargs,
):
self._attr_names_with_defaults = []
self._representation = self._infer_representation(
representation_type, differential_type
)
self._data = self._infer_data(args, copy, kwargs) # possibly None.
# Set frame attributes, if any
values = {}
for fnm, fdefault in self.get_frame_attr_defaults().items():
# Read-only frame attributes are defined as FrameAttribute
# descriptors which are not settable, so set 'real' attributes as
# the name prefaced with an underscore.
if fnm in kwargs:
value = kwargs.pop(fnm)
setattr(self, "_" + fnm, value)
# Validate attribute by getting it. If the instance has data,
# this also checks its shape is OK. If not, we do it below.
values[fnm] = getattr(self, fnm)
else:
setattr(self, "_" + fnm, fdefault)
self._attr_names_with_defaults.append(fnm)
if kwargs:
raise TypeError(
f"Coordinate frame {self.__class__.__name__} got unexpected "
f"keywords: {list(kwargs)}"
)
# We do ``is None`` because self._data might evaluate to false for
# empty arrays or data == 0
if self._data is None:
# No data: we still need to check that any non-scalar attributes
# have consistent shapes. Collect them for all attributes with
# size > 1 (which should be array-like and thus have a shape).
shapes = {
fnm: value.shape
for fnm, value in values.items()
if getattr(value, "shape", ())
}
if shapes:
if len(shapes) > 1:
try:
self._no_data_shape = check_broadcast(*shapes.values())
except ValueError as err:
raise ValueError(
f"non-scalar attributes with inconsistent shapes: {shapes}"
) from err
# Above, we checked that it is possible to broadcast all
# shapes. By getting and thus validating the attributes,
# we verify that the attributes can in fact be broadcast.
for fnm in shapes:
getattr(self, fnm)
else:
self._no_data_shape = shapes.popitem()[1]
else:
self._no_data_shape = ()
# The logic of this block is not related to the previous one
if self._data is not None:
# This makes the cache keys backwards-compatible, but also adds
# support for having differentials attached to the frame data
# representation object.
if "s" in self._data.differentials:
# TODO: assumes a velocity unit differential
key = (
self._data.__class__.__name__,
self._data.differentials["s"].__class__.__name__,
False,
)
else:
key = (self._data.__class__.__name__, False)
# Set up representation cache.
self.cache["representation"][key] = self._data
def _infer_representation(self, representation_type, differential_type):
if representation_type is None and differential_type is None:
return {"base": self.default_representation, "s": self.default_differential}
if representation_type is None:
representation_type = self.default_representation
if inspect.isclass(differential_type) and issubclass(
differential_type, r.BaseDifferential
):
# TODO: assumes the differential class is for the velocity
# differential
differential_type = {"s": differential_type}
elif isinstance(differential_type, str):
# TODO: assumes the differential class is for the velocity
# differential
diff_cls = r.DIFFERENTIAL_CLASSES[differential_type]
differential_type = {"s": diff_cls}
elif differential_type is None:
if representation_type == self.default_representation:
differential_type = {"s": self.default_differential}
else:
differential_type = {"s": "base"} # see set_representation_cls()
return _get_repr_classes(representation_type, **differential_type)
def _infer_data(self, args, copy, kwargs):
# if not set below, this is a frame with no data
representation_data = None
differential_data = None
args = list(args) # need to be able to pop them
if args and (isinstance(args[0], r.BaseRepresentation) or args[0] is None):
representation_data = args.pop(0) # This can still be None
if len(args) > 0:
raise TypeError(
"Cannot create a frame with both a representation object "
"and other positional arguments"
)
if representation_data is not None:
diffs = representation_data.differentials
differential_data = diffs.get("s", None)
if (differential_data is None and len(diffs) > 0) or (
differential_data is not None and len(diffs) > 1
):
raise ValueError(
"Multiple differentials are associated with the representation"
" object passed in to the frame initializer. Only a single"
f" velocity differential is supported. Got: {diffs}"
)
else:
representation_cls = self.get_representation_cls()
# Get any representation data passed in to the frame initializer
# using keyword or positional arguments for the component names
repr_kwargs = {}
for nmkw, nmrep in self.representation_component_names.items():
if len(args) > 0:
# first gather up positional args
repr_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
repr_kwargs[nmrep] = kwargs.pop(nmkw)
# special-case the Spherical->UnitSpherical if no `distance`
if repr_kwargs:
# TODO: determine how to get rid of the part before the "try" -
# currently removing it has a performance regression for
# unitspherical because of the try-related overhead.
# Also frames have no way to indicate what the "distance" is
if repr_kwargs.get("distance", True) is None:
del repr_kwargs["distance"]
if (
issubclass(representation_cls, r.SphericalRepresentation)
and "distance" not in repr_kwargs
):
representation_cls = representation_cls._unit_representation
try:
representation_data = representation_cls(copy=copy, **repr_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
try:
representation_data = representation_cls._unit_representation(
copy=copy, **repr_kwargs
)
except Exception:
msg = str(e)
names = self.get_representation_component_names()
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace("__init__()", f"{self.__class__.__name__}()")
e.args = (msg,)
raise e
# Now we handle the Differential data:
# Get any differential data passed in to the frame initializer
# using keyword or positional arguments for the component names
differential_cls = self.get_representation_cls("s")
diff_component_names = self.get_representation_component_names("s")
diff_kwargs = {}
for nmkw, nmrep in diff_component_names.items():
if len(args) > 0:
# first gather up positional args
diff_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
diff_kwargs[nmrep] = kwargs.pop(nmkw)
if diff_kwargs:
if (
hasattr(differential_cls, "_unit_differential")
and "d_distance" not in diff_kwargs
):
differential_cls = differential_cls._unit_differential
elif len(diff_kwargs) == 1 and "d_distance" in diff_kwargs:
differential_cls = r.RadialDifferential
try:
differential_data = differential_cls(copy=copy, **diff_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
msg = str(e)
names = self.get_representation_component_names("s")
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace("__init__()", f"{self.__class__.__name__}()")
e.args = (msg,)
raise
if len(args) > 0:
raise TypeError(
"{}.__init__ had {} remaining unhandled arguments".format(
self.__class__.__name__, len(args)
)
)
if representation_data is None and differential_data is not None:
raise ValueError(
"Cannot pass in differential component data "
"without positional (representation) data."
)
if differential_data:
# Check that differential data provided has units compatible
# with time-derivative of representation data.
# NOTE: there is no dimensionless time while lengths can be
# dimensionless (u.dimensionless_unscaled).
for comp in representation_data.components:
if (diff_comp := f"d_{comp}") in differential_data.components:
current_repr_unit = representation_data._units[comp]
current_diff_unit = differential_data._units[diff_comp]
expected_unit = current_repr_unit / u.s
if not current_diff_unit.is_equivalent(expected_unit):
for (
key,
val,
) in self.get_representation_component_names().items():
if val == comp:
current_repr_name = key
break
for key, val in self.get_representation_component_names(
"s"
).items():
if val == diff_comp:
current_diff_name = key
break
raise ValueError(
f'{current_repr_name} has unit "{current_repr_unit}" with'
f' physical type "{current_repr_unit.physical_type}", but'
f" {current_diff_name} has incompatible unit"
f' "{current_diff_unit}" with physical type'
f' "{current_diff_unit.physical_type}" instead of the'
f' expected "{(expected_unit).physical_type}".'
)
representation_data = representation_data.with_differentials(
{"s": differential_data}
)
return representation_data
@classmethod
def _infer_repr_info(cls, repr_info):
# Unless overridden via `frame_specific_representation_info`, velocity
# name defaults are (see also docstring for BaseCoordinateFrame):
# * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for
# `SphericalCosLatDifferential` proper motion components
# * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper
# motion components
# * ``radial_velocity`` for any `d_distance` component
# * ``v_{x,y,z}`` for `CartesianDifferential` velocity components
# where `{lon}` and `{lat}` are the frame names of the angular
# components.
if repr_info is None:
repr_info = {}
# the tuple() call below is necessary because if it is not there,
# the iteration proceeds in a difficult-to-predict manner in the
# case that one of the class objects hash is such that it gets
# revisited by the iteration. The tuple() call prevents this by
# making the items iterated over fixed regardless of how the dict
# changes
for cls_or_name in tuple(repr_info.keys()):
if isinstance(cls_or_name, str):
# TODO: this provides a layer of backwards compatibility in
# case the key is a string, but now we want explicit classes.
_cls = _get_repr_cls(cls_or_name)
repr_info[_cls] = repr_info.pop(cls_or_name)
# The default spherical names are 'lon' and 'lat'
repr_info.setdefault(
r.SphericalRepresentation,
[RepresentationMapping("lon", "lon"), RepresentationMapping("lat", "lat")],
)
sph_component_map = {
m.reprname: m.framename for m in repr_info[r.SphericalRepresentation]
}
repr_info.setdefault(
r.SphericalCosLatDifferential,
[
RepresentationMapping(
"d_lon_coslat",
"pm_{lon}_cos{lat}".format(**sph_component_map),
u.mas / u.yr,
),
RepresentationMapping(
"d_lat", "pm_{lat}".format(**sph_component_map), u.mas / u.yr
),
RepresentationMapping("d_distance", "radial_velocity", u.km / u.s),
],
)
repr_info.setdefault(
r.SphericalDifferential,
[
RepresentationMapping(
"d_lon", "pm_{lon}".format(**sph_component_map), u.mas / u.yr
),
RepresentationMapping(
"d_lat", "pm_{lat}".format(**sph_component_map), u.mas / u.yr
),
RepresentationMapping("d_distance", "radial_velocity", u.km / u.s),
],
)
repr_info.setdefault(
r.CartesianDifferential,
[
RepresentationMapping("d_x", "v_x", u.km / u.s),
RepresentationMapping("d_y", "v_y", u.km / u.s),
RepresentationMapping("d_z", "v_z", u.km / u.s),
],
)
# Unit* classes should follow the same naming conventions
# TODO: this adds some unnecessary mappings for the Unit classes, so
# this could be cleaned up, but in practice doesn't seem to have any
# negative side effects
repr_info.setdefault(
r.UnitSphericalRepresentation, repr_info[r.SphericalRepresentation]
)
repr_info.setdefault(
r.UnitSphericalCosLatDifferential, repr_info[r.SphericalCosLatDifferential]
)
repr_info.setdefault(
r.UnitSphericalDifferential, repr_info[r.SphericalDifferential]
)
return repr_info
@classmethod
def _create_readonly_property(cls, attr_name, value, doc=None):
private_attr = "_" + attr_name
def getter(self):
return getattr(self, private_attr)
setattr(cls, private_attr, value)
setattr(cls, attr_name, property(getter, doc=doc))
@lazyproperty
def cache(self):
"""
Cache for this frame, a dict. It stores anything that should be
computed from the coordinate data (*not* from the frame attributes).
This can be used in functions to store anything that might be
expensive to compute but might be re-used by some other function.
E.g.::
if 'user_data' in myframe.cache:
data = myframe.cache['user_data']
else:
myframe.cache['user_data'] = data = expensive_func(myframe.lat)
If in-place modifications are made to the frame data, the cache should
be cleared::
myframe.cache.clear()
"""
return defaultdict(dict)
@property
def data(self):
"""
The coordinate data for this object. If this frame has no data, an
`ValueError` will be raised. Use `has_data` to
check if data is present on this frame object.
"""
if self._data is None:
raise ValueError(
f'The frame object "{self!r}" does not have associated data'
)
return self._data
@property
def has_data(self):
"""
True if this frame has `data`, False otherwise.
"""
return self._data is not None
@property
def shape(self):
return self.data.shape if self.has_data else self._no_data_shape
# We have to override the ShapedLikeNDArray definitions, since our shape
# does not have to be that of the data.
def __len__(self):
return len(self.data)
def __bool__(self):
return self.has_data and self.size > 0
@property
def size(self):
return self.data.size
@property
def isscalar(self):
return self.has_data and self.data.isscalar
@classmethod
def get_frame_attr_defaults(cls):
"""Return a dict with the defaults for each frame attribute"""
return {name: getattr(cls, name) for name in cls.frame_attributes}
@deprecated(
"5.2",
alternative="get_frame_attr_defaults",
message=(
"The {func}() {obj_type} is deprecated and may be removed in a future"
" version. Use {alternative}() to obtain a dict of frame attribute names"
" and default values."
" The fastest way to obtain the names is frame_attributes.keys()"
),
)
@classmethod
def get_frame_attr_names(cls):
"""Return a dict with the defaults for each frame attribute"""
return cls.get_frame_attr_defaults()
def get_representation_cls(self, which="base"):
"""The class used for part of this frame's data.
Parameters
----------
which : ('base', 's', `None`)
The class of which part to return. 'base' means the class used to
represent the coordinates; 's' the first derivative to time, i.e.,
the class representing the proper motion and/or radial velocity.
If `None`, return a dict with both.
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`.
"""
if which is not None:
return self._representation[which]
else:
return self._representation
def set_representation_cls(self, base=None, s="base"):
"""Set representation and/or differential class for this frame's data.
Parameters
----------
base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional
The name or subclass to use to represent the coordinate data.
s : `~astropy.coordinates.BaseDifferential` subclass, optional
The differential subclass to use to represent any velocities,
such as proper motion and radial velocity. If equal to 'base',
which is the default, it will be inferred from the representation.
If `None`, the representation will drop any differentials.
"""
if base is None:
base = self._representation["base"]
self._representation = _get_repr_classes(base=base, s=s)
representation_type = property(
fget=get_representation_cls,
fset=set_representation_cls,
doc="""The representation class used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseRepresentation`.
Can also be *set* using the string name of the representation. If you
wish to set an explicit differential class (rather than have it be
inferred), use the ``set_representation_cls`` method.
""",
)
@property
def differential_type(self):
"""
The differential used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseDifferential`.
For simultaneous setting of representation and differentials, see the
``set_representation_cls`` method.
"""
return self.get_representation_cls("s")
@differential_type.setter
def differential_type(self, value):
self.set_representation_cls(s=value)
@classmethod
def _get_representation_info(cls):
# This exists as a class method only to support handling frame inputs
# without units, which are deprecated and will be removed. This can be
# moved into the representation_info property at that time.
# note that if so moved, the cache should be acceessed as
# self.__class__._frame_class_cache
if (
cls._frame_class_cache.get("last_reprdiff_hash", None)
!= r.get_reprdiff_cls_hash()
):
repr_attrs = {}
for repr_diff_cls in list(r.REPRESENTATION_CLASSES.values()) + list(
r.DIFFERENTIAL_CLASSES.values()
):
repr_attrs[repr_diff_cls] = {"names": [], "units": []}
for c, c_cls in repr_diff_cls.attr_classes.items():
repr_attrs[repr_diff_cls]["names"].append(c)
rec_unit = u.deg if issubclass(c_cls, Angle) else None
repr_attrs[repr_diff_cls]["units"].append(rec_unit)
for (
repr_diff_cls,
mappings,
) in cls._frame_specific_representation_info.items():
# take the 'names' and 'units' tuples from repr_attrs,
# and then use the RepresentationMapping objects
# to update as needed for this frame.
nms = repr_attrs[repr_diff_cls]["names"]
uns = repr_attrs[repr_diff_cls]["units"]
comptomap = {m.reprname: m for m in mappings}
for i, c in enumerate(repr_diff_cls.attr_classes.keys()):
if c in comptomap:
mapp = comptomap[c]
nms[i] = mapp.framename
# need the isinstance because otherwise if it's a unit it
# will try to compare to the unit string representation
if not (
isinstance(mapp.defaultunit, str)
and mapp.defaultunit == "recommended"
):
uns[i] = mapp.defaultunit
# else we just leave it as recommended_units says above
# Convert to tuples so that this can't mess with frame internals
repr_attrs[repr_diff_cls]["names"] = tuple(nms)
repr_attrs[repr_diff_cls]["units"] = tuple(uns)
cls._frame_class_cache["representation_info"] = repr_attrs
cls._frame_class_cache["last_reprdiff_hash"] = r.get_reprdiff_cls_hash()
return cls._frame_class_cache["representation_info"]
@lazyproperty
def representation_info(self):
"""
A dictionary with the information of what attribute names for this frame
apply to particular representations.
"""
return self._get_representation_info()
def get_representation_component_names(self, which="base"):
out = {}
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
data_names = repr_or_diff_cls.attr_classes.keys()
repr_names = self.representation_info[repr_or_diff_cls]["names"]
for repr_name, data_name in zip(repr_names, data_names):
out[repr_name] = data_name
return out
def get_representation_component_units(self, which="base"):
out = {}
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
repr_attrs = self.representation_info[repr_or_diff_cls]
repr_names = repr_attrs["names"]
repr_units = repr_attrs["units"]
for repr_name, repr_unit in zip(repr_names, repr_units):
if repr_unit:
out[repr_name] = repr_unit
return out
representation_component_names = property(get_representation_component_names)
representation_component_units = property(get_representation_component_units)
def _replicate(self, data, copy=False, **kwargs):
"""Base for replicating a frame, with possibly different attributes.
Produces a new instance of the frame using the attributes of the old
frame (unless overridden) and with the data given.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` or None
Data to use in the new frame instance. If `None`, it will be
a data-less frame.
copy : bool, optional
Whether data and the attributes on the old frame should be copied
(default), or passed on by reference.
**kwargs
Any attributes that should be overridden.
"""
# This is to provide a slightly nicer error message if the user tries
# to use frame_obj.representation instead of frame_obj.data to get the
# underlying representation object [e.g., #2890]
if inspect.isclass(data):
raise TypeError(
"Class passed as data instead of a representation instance. If you"
" called frame.representation, this returns the representation class."
" frame.data returns the instantiated object - you may want to use"
" this instead."
)
if copy and data is not None:
data = data.copy()
for attr in self.frame_attributes:
if attr not in self._attr_names_with_defaults and attr not in kwargs:
value = getattr(self, attr)
if copy:
value = value.copy()
kwargs[attr] = value
return self.__class__(data, copy=False, **kwargs)
def replicate(self, copy=False, **kwargs):
"""
Return a replica of the frame, optionally with new frame attributes.
The replica is a new frame object that has the same data as this frame
object and with frame attributes overridden if they are provided as extra
keyword arguments to this method. If ``copy`` is set to `True` then a
copy of the internal arrays will be made. Otherwise the replica will
use a reference to the original arrays when possible to save memory. The
internal arrays are normally not changeable by the user so in most cases
it should not be necessary to set ``copy`` to `True`.
Parameters
----------
copy : bool, optional
If True, the resulting object is a copy of the data. When False,
references are used where possible. This rule also applies to the
frame attributes.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `BaseCoordinateFrame` subclass instance
Replica of this object, but possibly with new frame attributes.
"""
return self._replicate(self.data, copy=copy, **kwargs)
def replicate_without_data(self, copy=False, **kwargs):
"""
Return a replica without data, optionally with new frame attributes.
The replica is a new frame object without data but with the same frame
attributes as this object, except where overridden by extra keyword
arguments to this method. The ``copy`` keyword determines if the frame
attributes are truly copied vs being references (which saves memory for
cases where frame attributes are large).
This method is essentially the converse of `realize_frame`.
Parameters
----------
copy : bool, optional
If True, the resulting object has copies of the frame attributes.
When False, references are used where possible.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `BaseCoordinateFrame` subclass instance
Replica of this object, but without data and possibly with new frame
attributes.
"""
return self._replicate(None, copy=copy, **kwargs)
def realize_frame(self, data, **kwargs):
"""
Generates a new frame with new data from another frame (which may or
may not have data). Roughly speaking, the converse of
`replicate_without_data`.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation`
The representation to use as the data for the new frame.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object. In particular, `representation_type` can be specified.
Returns
-------
frameobj : `BaseCoordinateFrame` subclass instance
A new object in *this* frame, with the same frame attributes as
this one, but with the ``data`` as the coordinate data.
"""
return self._replicate(data, **kwargs)
def represent_as(self, base, s="base", in_frame_units=False):
"""
Generate and return a new representation of this frame's `data`
as a Representation object.
Note: In order to make an in-place change of the representation
of a Frame or SkyCoord object, set the ``representation``
attribute of that object to the desired new representation, or
use the ``set_representation_cls`` method to also set the differential.
Parameters
----------
base : subclass of BaseRepresentation or string
The type of representation to generate. Must be a *class*
(not an instance), or the string name of the representation
class.
s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional
Class in which any velocities should be represented. Must be
a *class* (not an instance), or the string name of the
differential class. If equal to 'base' (default), inferred from
the base class. If `None`, all velocity information is dropped.
in_frame_units : bool, keyword-only
Force the representation units to match the specified units
particular to this frame
Returns
-------
newrep : BaseRepresentation-derived object
A new representation object of this frame's `data`.
Raises
------
AttributeError
If this object had no `data`
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> coord = SkyCoord(0*u.deg, 0*u.deg)
>>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP
<CartesianRepresentation (x, y, z) [dimensionless]
(1., 0., 0.)>
>>> coord.representation_type = CartesianRepresentation
>>> coord # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (x, y, z) [dimensionless]
(1., 0., 0.)>
"""
# For backwards compatibility (because in_frame_units used to be the
# 2nd argument), we check to see if `new_differential` is a boolean. If
# it is, we ignore the value of `new_differential` and warn about the
# position change
if isinstance(s, bool):
warnings.warn(
"The argument position for `in_frame_units` in `represent_as` has"
" changed. Use as a keyword argument if needed.",
AstropyWarning,
)
in_frame_units = s
s = "base"
# In the future, we may want to support more differentials, in which
# case one probably needs to define **kwargs above and use it here.
# But for now, we only care about the velocity.
repr_classes = _get_repr_classes(base=base, s=s)
representation_cls = repr_classes["base"]
# We only keep velocity information
if "s" in self.data.differentials:
# For the default 'base' option in which _get_repr_classes has
# given us a best guess based on the representation class, we only
# use it if the class we had already is incompatible.
if s == "base" and (
self.data.differentials["s"].__class__
in representation_cls._compatible_differentials
):
differential_cls = self.data.differentials["s"].__class__
else:
differential_cls = repr_classes["s"]
elif s is None or s == "base":
differential_cls = None
else:
raise TypeError(
"Frame data has no associated differentials (i.e. the frame has no"
" velocity data) - represent_as() only accepts a new representation."
)
if differential_cls:
cache_key = (
representation_cls.__name__,
differential_cls.__name__,
in_frame_units,
)
else:
cache_key = (representation_cls.__name__, in_frame_units)
cached_repr = self.cache["representation"].get(cache_key)
if not cached_repr:
if differential_cls:
# Sanity check to ensure we do not just drop radial
# velocity. TODO: should Representation.represent_as
# allow this transformation in the first place?
if (
isinstance(self.data, r.UnitSphericalRepresentation)
and issubclass(representation_cls, r.CartesianRepresentation)
and not isinstance(
self.data.differentials["s"],
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
)
):
raise u.UnitConversionError(
"need a distance to retrieve a cartesian representation "
"when both radial velocity and proper motion are present, "
"since otherwise the units cannot match."
)
# TODO NOTE: only supports a single differential
data = self.data.represent_as(representation_cls, differential_cls)
diff = data.differentials["s"] # TODO: assumes velocity
else:
data = self.data.represent_as(representation_cls)
# If the new representation is known to this frame and has a defined
# set of names and units, then use that.
new_attrs = self.representation_info.get(representation_cls)
if new_attrs and in_frame_units:
datakwargs = {comp: getattr(data, comp) for comp in data.components}
for comp, new_attr_unit in zip(data.components, new_attrs["units"]):
if new_attr_unit:
datakwargs[comp] = datakwargs[comp].to(new_attr_unit)
data = data.__class__(copy=False, **datakwargs)
if differential_cls:
# the original differential
data_diff = self.data.differentials["s"]
# If the new differential is known to this frame and has a
# defined set of names and units, then use that.
new_attrs = self.representation_info.get(differential_cls)
if new_attrs and in_frame_units:
diffkwargs = {comp: getattr(diff, comp) for comp in diff.components}
for comp, new_attr_unit in zip(diff.components, new_attrs["units"]):
# Some special-casing to treat a situation where the
# input data has a UnitSphericalDifferential or a
# RadialDifferential. It is re-represented to the
# frame's differential class (which might be, e.g., a
# dimensional Differential), so we don't want to try to
# convert the empty component units
if (
isinstance(
data_diff,
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
),
)
and comp not in data_diff.__class__.attr_classes
):
continue
elif (
isinstance(data_diff, r.RadialDifferential)
and comp not in data_diff.__class__.attr_classes
):
continue
# Try to convert to requested units. Since that might
# not be possible (e.g., for a coordinate with proper
# motion but without distance, one cannot convert to a
# cartesian differential in km/s), we allow the unit
# conversion to fail. See gh-7028 for discussion.
if new_attr_unit and hasattr(diff, comp):
try:
diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit)
except Exception:
pass
diff = diff.__class__(copy=False, **diffkwargs)
# Here we have to bypass using with_differentials() because
# it has a validation check. But because
# .representation_type and .differential_type don't point to
# the original classes, if the input differential is a
# RadialDifferential, it usually gets turned into a
# SphericalCosLatDifferential (or whatever the default is)
# with strange units for the d_lon and d_lat attributes.
# This then causes the dictionary key check to fail (i.e.
# comparison against `diff._get_deriv_key()`)
data._differentials.update({"s": diff})
self.cache["representation"][cache_key] = data
return self.cache["representation"][cache_key]
def transform_to(self, new_frame):
"""
Transform this object's coordinate data to a new frame.
Parameters
----------
new_frame : coordinate-like or `BaseCoordinateFrame` subclass instance
The frame to transform this coordinate frame into.
The frame class option is deprecated.
Returns
-------
transframe : coordinate-like
A new object with the coordinate data represented in the
``newframe`` system.
Raises
------
ValueError
If there is no possible transformation route.
"""
from .errors import ConvertError
if self._data is None:
raise ValueError("Cannot transform a frame with no data")
if (
getattr(self.data, "differentials", None)
and hasattr(self, "obstime")
and hasattr(new_frame, "obstime")
and np.any(self.obstime != new_frame.obstime)
):
raise NotImplementedError(
"You cannot transform a frame that has velocities to another frame at a"
" different obstime. If you think this should (or should not) be"
" possible, please comment at"
" https://github.com/astropy/astropy/issues/6280"
)
if inspect.isclass(new_frame):
warnings.warn(
"Transforming a frame instance to a frame class (as opposed to another "
"frame instance) will not be supported in the future. Either "
"explicitly instantiate the target frame, or first convert the source "
"frame instance to a `astropy.coordinates.SkyCoord` and use its "
"`transform_to()` method.",
AstropyDeprecationWarning,
)
# Use the default frame attributes for this class
new_frame = new_frame()
if hasattr(new_frame, "_sky_coord_frame"):
# Input new_frame is not a frame instance or class and is most
# likely a SkyCoord object.
new_frame = new_frame._sky_coord_frame
trans = frame_transform_graph.get_transform(self.__class__, new_frame.__class__)
if trans is None:
if new_frame is self.__class__:
# no special transform needed, but should update frame info
return new_frame.realize_frame(self.data)
msg = "Cannot transform from {0} to {1}"
raise ConvertError(msg.format(self.__class__, new_frame.__class__))
return trans(self, new_frame)
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : `BaseCoordinateFrame` subclass or instance
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__
trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls)
if trans is None:
if new_frame_cls is self.__class__:
return "same"
else:
return False
else:
return True
def is_frame_attr_default(self, attrnm):
"""
Determine whether or not a frame attribute has its value because it's
the default value, or because this frame was created with that value
explicitly requested.
Parameters
----------
attrnm : str
The name of the attribute to check.
Returns
-------
isdefault : bool
True if the attribute ``attrnm`` has its value by default, False if
it was specified at creation of this frame.
"""
return attrnm in self._attr_names_with_defaults
@staticmethod
def _frameattr_equiv(left_fattr, right_fattr):
"""
Determine if two frame attributes are equivalent. Implemented as a
staticmethod mainly as a convenient location, although conceivable it
might be desirable for subclasses to override this behavior.
Primary purpose is to check for equality of representations. This
aspect can actually be simplified/removed now that representations have
equality defined.
Secondary purpose is to check for equality of coordinate attributes,
which first checks whether they themselves are in equivalent frames
before checking for equality in the normal fashion. This is because
checking for equality with non-equivalent frames raises an error.
"""
if left_fattr is right_fattr:
# shortcut if it's exactly the same object
return True
elif left_fattr is None or right_fattr is None:
# shortcut if one attribute is unspecified and the other isn't
return False
left_is_repr = isinstance(left_fattr, r.BaseRepresentationOrDifferential)
right_is_repr = isinstance(right_fattr, r.BaseRepresentationOrDifferential)
if left_is_repr and right_is_repr:
# both are representations.
if getattr(left_fattr, "differentials", False) or getattr(
right_fattr, "differentials", False
):
warnings.warn(
"Two representation frame attributes were checked for equivalence"
" when at least one of them has differentials. This yields False"
" even if the underlying representations are equivalent (although"
" this may change in future versions of Astropy)",
AstropyWarning,
)
return False
if isinstance(right_fattr, left_fattr.__class__):
# if same representation type, compare components.
return np.all(
[
(getattr(left_fattr, comp) == getattr(right_fattr, comp))
for comp in left_fattr.components
]
)
else:
# convert to cartesian and see if they match
return np.all(
left_fattr.to_cartesian().xyz == right_fattr.to_cartesian().xyz
)
elif left_is_repr or right_is_repr:
return False
left_is_coord = isinstance(left_fattr, BaseCoordinateFrame)
right_is_coord = isinstance(right_fattr, BaseCoordinateFrame)
if left_is_coord and right_is_coord:
# both are coordinates
if left_fattr.is_equivalent_frame(right_fattr):
return np.all(left_fattr == right_fattr)
else:
return False
elif left_is_coord or right_is_coord:
return False
return np.all(left_fattr == right_fattr)
def is_equivalent_frame(self, other):
"""
Checks if this object is the same frame as the ``other`` object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. Note that it does *not* matter what, if any,
data either object has.
Parameters
----------
other : :class:`~astropy.coordinates.BaseCoordinateFrame`
the other frame to check
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `BaseCoordinateFrame` or subclass.
"""
if self.__class__ == other.__class__:
for frame_attr_name in self.frame_attributes:
if not self._frameattr_equiv(
getattr(self, frame_attr_name), getattr(other, frame_attr_name)
):
return False
return True
elif not isinstance(other, BaseCoordinateFrame):
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't a frame"
)
else:
return False
def __repr__(self):
frameattrs = self._frame_attrs_repr()
data_repr = self._data_repr()
if frameattrs:
frameattrs = f" ({frameattrs})"
if data_repr:
return f"<{self.__class__.__name__} Coordinate{frameattrs}: {data_repr}>"
else:
return f"<{self.__class__.__name__} Frame{frameattrs}>"
def _data_repr(self):
"""Returns a string representation of the coordinate data."""
if not self.has_data:
return ""
if self.representation_type:
if hasattr(self.representation_type, "_unit_representation") and isinstance(
self.data, self.representation_type._unit_representation
):
rep_cls = self.data.__class__
else:
rep_cls = self.representation_type
if "s" in self.data.differentials:
dif_cls = self.get_representation_cls("s")
dif_data = self.data.differentials["s"]
if isinstance(
dif_data,
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
):
dif_cls = dif_data.__class__
else:
dif_cls = None
data = self.represent_as(rep_cls, dif_cls, in_frame_units=True)
data_repr = repr(data)
# Generate the list of component names out of the repr string
part1, _, remainder = data_repr.partition("(")
if remainder != "":
comp_str, _, part2 = remainder.partition(")")
comp_names = comp_str.split(", ")
# Swap in frame-specific component names
invnames = {
nmrepr: nmpref
for nmpref, nmrepr in self.representation_component_names.items()
}
for i, name in enumerate(comp_names):
comp_names[i] = invnames.get(name, name)
# Reassemble the repr string
data_repr = part1 + "(" + ", ".join(comp_names) + ")" + part2
else:
data = self.data
data_repr = repr(self.data)
if data_repr.startswith("<" + data.__class__.__name__):
# remove both the leading "<" and the space after the name, as well
# as the trailing ">"
data_repr = data_repr[(len(data.__class__.__name__) + 2) : -1]
else:
data_repr = "Data:\n" + data_repr
if "s" in self.data.differentials:
data_repr_spl = data_repr.split("\n")
if "has differentials" in data_repr_spl[-1]:
diffrepr = repr(data.differentials["s"]).split("\n")
if diffrepr[0].startswith("<"):
diffrepr[0] = " " + " ".join(diffrepr[0].split(" ")[1:])
for frm_nm, rep_nm in self.get_representation_component_names(
"s"
).items():
diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm)
if diffrepr[-1].endswith(">"):
diffrepr[-1] = diffrepr[-1][:-1]
data_repr_spl[-1] = "\n".join(diffrepr)
data_repr = "\n".join(data_repr_spl)
return data_repr
def _frame_attrs_repr(self):
"""
Returns a string representation of the frame's attributes, if any.
"""
attr_strs = []
for attribute_name in self.frame_attributes:
attr = getattr(self, attribute_name)
# Check to see if this object has a way of representing itself
# specific to being an attribute of a frame. (Note, this is not the
# Attribute class, it's the actual object).
if hasattr(attr, "_astropy_repr_in_frame"):
attrstr = attr._astropy_repr_in_frame()
else:
attrstr = str(attr)
attr_strs.append(f"{attribute_name}={attrstr}")
return ", ".join(attr_strs)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
new = super().__new__(self.__class__)
if hasattr(self, "_representation"):
new._representation = self._representation.copy()
new._attr_names_with_defaults = self._attr_names_with_defaults.copy()
for attr in self.frame_attributes:
_attr = "_" + attr
if attr in self._attr_names_with_defaults:
setattr(new, _attr, getattr(self, _attr))
else:
value = getattr(self, _attr)
if getattr(value, "shape", ()):
value = apply_method(value)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, _attr, value)
if self.has_data:
new._data = apply_method(self.data)
else:
new._data = None
shapes = [
getattr(new, "_" + attr).shape
for attr in new.frame_attributes
if (
attr not in new._attr_names_with_defaults
and getattr(getattr(new, "_" + attr), "shape", ())
)
]
if shapes:
new._no_data_shape = (
check_broadcast(*shapes) if len(shapes) > 1 else shapes[0]
)
else:
new._no_data_shape = ()
return new
def __setitem__(self, item, value):
if self.__class__ is not value.__class__:
raise TypeError(
f"can only set from object of same class: {self.__class__.__name__} vs."
f" {value.__class__.__name__}"
)
if not self.is_equivalent_frame(value):
raise ValueError("can only set frame item from an equivalent frame")
if value._data is None:
raise ValueError("can only set frame with value that has data")
if self._data is None:
raise ValueError("cannot set frame which has no data")
if self.shape == ():
raise TypeError(
f"scalar '{self.__class__.__name__}' frame object "
"does not support item assignment"
)
if self._data is None:
raise ValueError("can only set frame if it has data")
if self._data.__class__ is not value._data.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self._data.__class__.__name__} vs. {value._data.__class__.__name__}"
)
if self._data._differentials:
# Can this ever occur? (Same class but different differential keys).
# This exception is not tested since it is not clear how to generate it.
if self._data._differentials.keys() != value._data._differentials.keys():
raise ValueError("setitem value must have same differentials")
for key, self_diff in self._data._differentials.items():
if self_diff.__class__ is not value._data._differentials[key].__class__:
raise TypeError(
"can only set from object of same class: "
f"{self_diff.__class__.__name__} vs. "
f"{value._data._differentials[key].__class__.__name__}"
)
# Set representation data
self._data[item] = value._data
# Frame attributes required to be identical by is_equivalent_frame,
# no need to set them here.
self.cache.clear()
def __dir__(self):
"""
Override the builtin `dir` behavior to include representation
names.
TODO: dynamic representation transforms (i.e. include cylindrical et al.).
"""
return sorted(
set(super().__dir__())
| set(self.representation_component_names)
| set(self.get_representation_component_names("s"))
)
def __getattr__(self, attr):
"""
Allow access to attributes on the representation and differential as
found via ``self.get_representation_component_names``.
TODO: We should handle dynamic representation transforms here (e.g.,
`.cylindrical`) instead of defining properties as below.
"""
# attr == '_representation' is likely from the hasattr() test in the
# representation property which is used for
# self.representation_component_names.
#
# Prevent infinite recursion here.
if attr.startswith("_"):
return self.__getattribute__(attr) # Raise AttributeError.
repr_names = self.representation_component_names
if attr in repr_names:
if self._data is None:
self.data # this raises the "no data" error by design - doing it
# this way means we don't have to replicate the error message here
rep = self.represent_as(self.representation_type, in_frame_units=True)
val = getattr(rep, repr_names[attr])
return val
diff_names = self.get_representation_component_names("s")
if attr in diff_names:
if self._data is None:
self.data # see above.
# TODO: this doesn't work for the case when there is only
# unitspherical information. The differential_type gets set to the
# default_differential, which expects full information, so the
# units don't work out
rep = self.represent_as(
in_frame_units=True, **self.get_representation_cls(None)
)
val = getattr(rep.differentials["s"], diff_names[attr])
return val
return self.__getattribute__(attr) # Raise AttributeError.
def __setattr__(self, attr, value):
# Don't slow down access of private attributes!
if not attr.startswith("_"):
if hasattr(self, "representation_info"):
repr_attr_names = set()
for representation_attr in self.representation_info.values():
repr_attr_names.update(representation_attr["names"])
if attr in repr_attr_names:
raise AttributeError(f"Cannot set any frame attribute {attr}")
super().__setattr__(attr, value)
def __eq__(self, value):
"""Equality operator for frame.
This implements strict equality and requires that the frames are
equivalent and that the representation data are exactly equal.
"""
if not isinstance(value, BaseCoordinateFrame):
return NotImplemented
is_equiv = self.is_equivalent_frame(value)
if self._data is None and value._data is None:
# For Frame with no data, == compare is same as is_equivalent_frame()
return is_equiv
if not is_equiv:
raise TypeError(
"cannot compare: objects must have equivalent frames: "
f"{self.replicate_without_data()} vs. {value.replicate_without_data()}"
)
if (value._data is None) != (self._data is None):
raise ValueError(
"cannot compare: one frame has data and the other does not"
)
return self._data == value._data
def __ne__(self, value):
return np.logical_not(self == value)
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from .angle_utilities import angular_separation
from .angles import Angle
self_unit_sph = self.represent_as(r.UnitSphericalRepresentation)
other_transformed = other.transform_to(self)
other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation)
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(
self_unit_sph.lon, self_unit_sph.lat, other_unit_sph.lon, other_unit_sph.lat
)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate system to get the distance to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
from .distances import Distance
if issubclass(self.data.__class__, r.UnitSphericalRepresentation):
raise ValueError(
"This object does not have a distance; cannot compute 3d separation."
)
# do this first just in case the conversion somehow creates a distance
other_in_self_system = other.transform_to(self)
if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation):
raise ValueError(
"The other object does not have a distance; "
"cannot compute 3d separation."
)
# drop the differentials to ensure they don't do anything odd in the
# subtraction
self_car = self.data.without_differentials().represent_as(
r.CartesianRepresentation
)
other_car = other_in_self_system.data.without_differentials().represent_as(
r.CartesianRepresentation
)
dist = (self_car - other_car).norm()
if dist.unit == u.one:
return dist
else:
return Distance(dist)
@property
def cartesian(self):
"""
Shorthand for a cartesian representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("cartesian", in_frame_units=True)
@property
def cylindrical(self):
"""
Shorthand for a cylindrical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("cylindrical", in_frame_units=True)
@property
def spherical(self):
"""
Shorthand for a spherical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("spherical", in_frame_units=True)
@property
def sphericalcoslat(self):
"""
Shorthand for a spherical representation of the positional data and a
`SphericalCosLatDifferential` for the velocity data in this object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("spherical", "sphericalcoslat", in_frame_units=True)
@property
def velocity(self):
"""
Shorthand for retrieving the Cartesian space-motion as a
`CartesianDifferential` object. This is equivalent to calling
``self.cartesian.differentials['s']``.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
return self.cartesian.differentials["s"]
@property
def proper_motion(self):
"""
Shorthand for the two-dimensional proper motion as a
`~astropy.units.Quantity` object with angular velocity units. In the
returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude
dimension so that ``.proper_motion[0]`` is the longitudinal proper
motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper
motion already includes the cos(latitude) term.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
sph = self.represent_as("spherical", "sphericalcoslat", in_frame_units=True)
pm_lon = sph.differentials["s"].d_lon_coslat
pm_lat = sph.differentials["s"].d_lat
return (
np.stack((pm_lon.value, pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit
)
@property
def radial_velocity(self):
"""
Shorthand for the radial or line-of-sight velocity as a
`~astropy.units.Quantity` object.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
sph = self.represent_as("spherical", in_frame_units=True)
return sph.differentials["s"].d_distance
class GenericFrame(BaseCoordinateFrame):
"""
A frame object that can't store data but can hold any arbitrary frame
attributes. Mostly useful as a utility for the high-level class to store
intermediate frame attributes.
Parameters
----------
frame_attrs : dict
A dictionary of attributes to be used as the frame attributes for this
frame.
"""
name = None # it's not a "real" frame so it doesn't have a name
def __init__(self, frame_attrs):
self.frame_attributes = {}
for name, default in frame_attrs.items():
self.frame_attributes[name] = Attribute(default)
setattr(self, "_" + name, default)
super().__init__(None)
def __getattr__(self, name):
if "_" + name in self.__dict__:
return getattr(self, "_" + name)
else:
raise AttributeError(f"no {name}")
def __setattr__(self, name, value):
if name in self.frame_attributes:
raise AttributeError(f"can't set frame attribute '{name}'")
else:
super().__setattr__(name, value)
|
8281597376892d0fb8efc9a5c39c6eb3cd50ea92fd33c5511e41a63765937ff0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
import json
import socket
import urllib.error
import urllib.parse
import urllib.request
from warnings import warn
import erfa
import numpy as np
from astropy import constants as consts
from astropy import units as u
from astropy.units.quantity import QuantityInfoBase
from astropy.utils import data
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle, Latitude, Longitude
from .errors import UnknownSiteException
from .matrix_utilities import matrix_transpose
from .representation import (
BaseRepresentation,
CartesianDifferential,
CartesianRepresentation,
)
__all__ = [
"EarthLocation",
"BaseGeodeticRepresentation",
"WGS84GeodeticRepresentation",
"WGS72GeodeticRepresentation",
"GRS80GeodeticRepresentation",
]
GeodeticLocation = collections.namedtuple("GeodeticLocation", ["lon", "lat", "height"])
ELLIPSOIDS = {}
"""Available ellipsoids (defined in erfam.h, with numbers exposed in erfa)."""
# Note: they get filled by the creation of the geodetic classes.
OMEGA_EARTH = (1.002_737_811_911_354_48 * u.cycle / u.day).to(
1 / u.s, u.dimensionless_angles()
)
"""
Rotational velocity of Earth, following SOFA's pvtob.
In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value
in SI seconds, so multiply by the ratio of stellar to solar day.
See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth
Seidelmann (1992), University Science Books. The constant is the
conventional, exact one (IERS conventions 2003); see
http://hpiers.obspm.fr/eop-pc/index.php?index=constants.
"""
def _check_ellipsoid(ellipsoid=None, default="WGS84"):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError(f"Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})")
return ellipsoid
def _get_json_result(url, err_str, use_google):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode("utf8"))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
raise NameResolveError(err_str.format(msg="connection timed out")) from e
else:
raise NameResolveError(err_str.format(msg=e.reason)) from e
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
if use_google:
results = resp_data.get("results", [])
if resp_data.get("status", None) != "OK":
raise NameResolveError(
err_str.format(msg="unknown failure with Google API")
)
else: # OpenStreetMap returns a list
results = resp_data
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
return results
class EarthLocationInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("x", "y", "z", "ellipsoid")
def _construct_from_dict(self, map):
# Need to pop ellipsoid off and update post-instantiation. This is
# on the to-fix list in #4261.
ellipsoid = map.pop("ellipsoid")
out = self._parent_cls(**map)
out.ellipsoid = ellipsoid
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new EarthLocation instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : EarthLocation (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Very similar to QuantityInfo.new_like, but the creation of the
# map is different enough that this needs its own rouinte.
# Get merged info attributes shape, dtype, format, description.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# The above raises an error if the dtypes do not match, but returns
# just the string representation, which is not useful, so remove.
attrs.pop("dtype")
# Make empty EarthLocation using the dtype and unit of the last column.
# Use zeros so we do not get problems for possible conversion to
# geodetic coordinates.
shape = (length,) + attrs.pop("shape")
data = u.Quantity(
np.zeros(shape=shape, dtype=cols[0].dtype), unit=cols[0].unit, copy=False
)
# Get arguments needed to reconstruct class
map = {
key: (data[key] if key in "xyz" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class EarthLocation(u.Quantity):
"""
Location on the Earth.
Initialization is first attempted assuming geocentric (x, y, z) coordinates
are given; if that fails, another attempt is made assuming geodetic
coordinates (longitude, latitude, height above a reference ellipsoid).
When using the geodetic forms, Longitudes are measured increasing to the
east, so west longitudes are negative. Internally, the coordinates are
stored as geocentric.
To ensure a specific type of coordinates is used, use the corresponding
class methods (`from_geocentric` and `from_geodetic`) or initialize the
arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``,
``height`` for geodetic). See the class methods for details.
Notes
-----
This class fits into the coordinates transformation framework in that it
encodes a position on the `~astropy.coordinates.ITRS` frame. To get a
proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs``
property.
"""
_ellipsoid = "WGS84"
_location_dtype = np.dtype({"names": ["x", "y", "z"], "formats": [np.float64] * 3})
_array_dtype = np.dtype((np.float64, (3,)))
info = EarthLocationInfo()
def __new__(cls, *args, **kwargs):
# TODO: needs copy argument and better dealing with inputs.
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], EarthLocation):
return args[0].copy()
try:
self = cls.from_geocentric(*args, **kwargs)
except (u.UnitsError, TypeError) as exc_geocentric:
try:
self = cls.from_geodetic(*args, **kwargs)
except Exception as exc_geodetic:
raise TypeError(
"Coordinates could not be parsed as either "
"geocentric or geodetic, with respective "
f'exceptions "{exc_geocentric}" and "{exc_geodetic}"'
)
return self
@classmethod
def from_geocentric(cls, x, y, z, unit=None):
"""
Location on Earth, initialized from geocentric coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array-like
Cartesian coordinates. If not quantities, ``unit`` should be given.
unit : unit-like or None
Physical unit of the coordinate values. If ``x``, ``y``, and/or
``z`` are quantities, they will be converted to this unit.
Raises
------
astropy.units.UnitsError
If the units on ``x``, ``y``, and ``z`` do not match or an invalid
unit is given.
ValueError
If the shapes of ``x``, ``y``, and ``z`` do not match.
TypeError
If ``x`` is not a `~astropy.units.Quantity` and no unit is given.
"""
if unit is None:
try:
unit = x.unit
except AttributeError:
raise TypeError(
"Geocentric coordinates should be Quantities "
"unless an explicit unit is given."
) from None
else:
unit = u.Unit(unit)
if unit.physical_type != "length":
raise u.UnitsError("Geocentric coordinates should be in units of length.")
try:
x = u.Quantity(x, unit, copy=False)
y = u.Quantity(y, unit, copy=False)
z = u.Quantity(z, unit, copy=False)
except u.UnitsError:
raise u.UnitsError("Geocentric coordinate units should all be consistent.")
x, y, z = np.broadcast_arrays(x, y, z)
struc = np.empty(x.shape, cls._location_dtype)
struc["x"], struc["y"], struc["z"] = x, y, z
return super().__new__(cls, struc, unit, copy=False)
@classmethod
def from_geodetic(cls, lon, lat, height=0.0, ellipsoid=None):
"""
Location on Earth, initialized from geodetic coordinates.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth East longitude. Can be anything that initialises an
`~astropy.coordinates.Angle` object (if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
height : `~astropy.units.Quantity` ['length'] or float, optional
Height above reference ellipsoid (if float, in meters; default: 0).
ellipsoid : str, optional
Name of the reference ellipsoid to use (default: 'WGS84').
Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'.
Raises
------
astropy.units.UnitsError
If the units on ``lon`` and ``lat`` are inconsistent with angular
ones, or that on ``height`` with a length.
ValueError
If ``lon``, ``lat``, and ``height`` do not have the same shape, or
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geocentric coordinates, the ERFA routine
``gd2gc`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid)
# As wrapping fails on readonly input, we do so manually
lon = Angle(lon, u.degree, copy=False).wrap_at(180 * u.degree)
lat = Latitude(lat, u.degree, copy=False)
# don't convert to m by default, so we can use the height unit below.
if not isinstance(height, u.Quantity):
height = u.Quantity(height, u.m, copy=False)
# get geocentric coordinates.
geodetic = ELLIPSOIDS[ellipsoid](lon, lat, height, copy=False)
xyz = geodetic.to_cartesian().get_xyz(xyz_axis=-1) << height.unit
self = xyz.view(cls._location_dtype, cls).reshape(geodetic.shape)
self._ellipsoid = ellipsoid
return self
@classmethod
def of_site(cls, site_name):
"""
Return an object of this class for a known observatory/site by name.
This is intended as a quick convenience function to get basic site
information, not a fully-featured exhaustive registry of observatories
and all their properties.
Additional information about the site is stored in the ``.info.meta``
dictionary of sites obtained using this method (see the examples below).
.. note::
When this function is called, it will attempt to download site
information from the astropy data server. If you would like a site
to be added, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If a site cannot be found in the registry (i.e., an internet
connection is not available), it will fall back on a built-in list,
In the future, this bundled list might include a version-controlled
list of canonical observatories extracted from the online version,
but it currently only contains the Greenwich Royal Observatory as an
example case.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
Returns
-------
site : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the observatory. The returned class will be the same
as this class.
Examples
--------
>>> from astropy.coordinates import EarthLocation
>>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA
>>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP
GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>)
>>> keck.info # doctest: +REMOTE_DATA
name = W. M. Keck Observatory
dtype = (float64, float64, float64)
unit = m
class = EarthLocation
n_bad = 0
>>> keck.info.meta # doctest: +REMOTE_DATA
{'source': 'IRAF Observatory Database', 'timezone': 'US/Hawaii'}
See Also
--------
get_site_names : the list of sites that this function can access
""" # noqa: E501
registry = cls._get_site_registry()
try:
el = registry[site_name]
except UnknownSiteException as e:
raise UnknownSiteException(
e.site, "EarthLocation.get_site_names", close_names=e.close_names
) from e
if cls is el.__class__:
return el
else:
newel = cls.from_geodetic(*el.to_geodetic())
newel.info.name = el.info.name
return newel
@classmethod
def of_address(cls, address, get_height=False, google_api_key=None):
"""
Return an object of this class for a given address by querying either
the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding
API [2]_, which requires a specified API key.
This is intended as a quick convenience function to get easy access to
locations. If you need to specify a precise location, you should use the
initializer directly and pass in a longitude, latitude, and elevation.
In the background, this just issues a web query to either of
the APIs noted above. This is not meant to be abused! Both
OpenStreetMap and Google use IP-based query limiting and will ban your
IP if you send more than a few thousand queries per hour [2]_.
.. warning::
If the query returns more than one location (e.g., searching on
``address='springfield'``), this function will use the **first**
returned location.
Parameters
----------
address : str
The address to get the location for. As per the Google maps API,
this can be a fully specified street address (e.g., 123 Main St.,
New York, NY) or a city name (e.g., Danbury, CT), or etc.
get_height : bool, optional
This only works when using the Google API! See the ``google_api_key``
block below. Use the retrieved location to perform a second query to
the Google maps elevation API to retrieve the height of the input
address [3]_.
google_api_key : str, optional
A Google API key with the Geocoding API and (optionally) the
elevation API enabled. See [4]_ for more information.
Returns
-------
location : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the input address.
Will be type(this class)
References
----------
.. [1] https://nominatim.openstreetmap.org/
.. [2] https://developers.google.com/maps/documentation/geocoding/start
.. [3] https://developers.google.com/maps/documentation/elevation/start
.. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key
"""
use_google = google_api_key is not None
# Fail fast if invalid options are passed:
if not use_google and get_height:
raise ValueError(
"Currently, `get_height` only works when using the Google geocoding"
" API, which requires passing a Google API key with `google_api_key`."
" See:"
" https://developers.google.com/maps/documentation/geocoding/get-api-key"
" for information on obtaining an API key."
)
if use_google: # Google
pars = urllib.parse.urlencode({"address": address, "key": google_api_key})
geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}"
else: # OpenStreetMap
pars = urllib.parse.urlencode({"q": address, "format": "json"})
geo_url = f"https://nominatim.openstreetmap.org/search?{pars}"
# get longitude and latitude location
err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}"
geo_result = _get_json_result(geo_url, err_str=err_str, use_google=use_google)
if use_google:
loc = geo_result[0]["geometry"]["location"]
lat = loc["lat"]
lon = loc["lng"]
else:
loc = geo_result[0]
lat = float(loc["lat"]) # strings are returned by OpenStreetMap
lon = float(loc["lon"])
if get_height:
pars = {"locations": f"{lat:.8f},{lon:.8f}", "key": google_api_key}
pars = urllib.parse.urlencode(pars)
ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}"
err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}"
ele_result = _get_json_result(
ele_url, err_str=err_str, use_google=use_google
)
height = ele_result[0]["elevation"] * u.meter
else:
height = 0.0
return cls.from_geodetic(lon=lon * u.deg, lat=lat * u.deg, height=height)
@classmethod
def get_site_names(cls):
"""
Get list of names of observatories for use with
`~astropy.coordinates.EarthLocation.of_site`.
.. note::
When this function is called, it will first attempt to
download site information from the astropy data server. If it
cannot (i.e., an internet connection is not available), it will fall
back on the list included with astropy (which is a limited and dated
set of sites). If you think a site should be added, issue a pull
request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
Returns
-------
names : list of str
List of valid observatory names
See Also
--------
of_site : Gets the actual location object for one of the sites names
this returns.
"""
return cls._get_site_registry().names
@classmethod
def _get_site_registry(cls, force_download=False, force_builtin=False):
"""
Gets the site registry. The first time this either downloads or loads
from the data file packaged with astropy. Subsequent calls will use the
cached version unless explicitly overridden.
Parameters
----------
force_download : bool or str
If not False, force replacement of the cached registry with a
downloaded version. If a str, that will be used as the URL to
download from (if just True, the default URL will be used).
force_builtin : bool
If True, load from the data file bundled with astropy and set the
cache to that.
Returns
-------
reg : astropy.coordinates.sites.SiteRegistry
"""
# need to do this here at the bottom to avoid circular dependencies
from .sites import get_builtin_sites, get_downloaded_sites
if force_builtin and force_download:
raise ValueError("Cannot have both force_builtin and force_download True")
if force_builtin:
reg = cls._site_registry = get_builtin_sites()
else:
reg = getattr(cls, "_site_registry", None)
if force_download or not reg:
try:
if isinstance(force_download, str):
reg = get_downloaded_sites(force_download)
else:
reg = get_downloaded_sites()
except OSError:
if force_download:
raise
msg = (
"Could not access the online site list. Falling "
"back on the built-in version, which is rather "
"limited. If you want to retry the download, do "
"{0}._get_site_registry(force_download=True)"
)
warn(AstropyUserWarning(msg.format(cls.__name__)))
reg = get_builtin_sites()
cls._site_registry = reg
return reg
@property
def ellipsoid(self):
"""The default ellipsoid used to convert to geodetic coordinates."""
return self._ellipsoid
@ellipsoid.setter
def ellipsoid(self, ellipsoid):
self._ellipsoid = _check_ellipsoid(ellipsoid)
@property
def geodetic(self):
"""Convert to geodetic coordinates for the default ellipsoid."""
return self.to_geodetic()
def to_geodetic(self, ellipsoid=None):
"""Convert to geodetic coordinates.
Parameters
----------
ellipsoid : str, optional
Reference ellipsoid to use. Default is the one the coordinates
were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72'
Returns
-------
lon, lat, height : `~astropy.units.Quantity`
The tuple is a ``GeodeticLocation`` namedtuple and is comprised of
instances of `~astropy.coordinates.Longitude`,
`~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`.
Raises
------
ValueError
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geodetic coordinates, the ERFA routine
``gc2gd`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid)
xyz = self.view(self._array_dtype, u.Quantity)
llh = CartesianRepresentation(xyz, xyz_axis=-1, copy=False).represent_as(
ELLIPSOIDS[ellipsoid]
)
return GeodeticLocation(
Longitude(llh.lon, u.deg, wrap_angle=180 * u.deg, copy=False),
llh.lat << u.deg,
llh.height << self.unit,
)
@property
def lon(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
def lat(self):
"""Latitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def height(self):
"""Height of the location, for the default ellipsoid."""
return self.geodetic[2]
# mostly for symmetry with geodetic and to_geodetic.
@property
def geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return self.to_geocentric()
def to_geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return (self.x, self.y, self.z)
def get_itrs(self, obstime=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.shape:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
itrs = property(
get_itrs,
doc="""An `~astropy.coordinates.ITRS` object
for the location of this object at the
default ``obstime``.""",
)
def get_gcrs(self, obstime):
"""GCRS position with velocity at ``obstime`` as a GCRS coordinate.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
gcrs : `~astropy.coordinates.GCRS` instance
With velocity included.
"""
# do this here to prevent a series of complicated circular imports
from .builtin_frames import GCRS
loc, vel = self.get_gcrs_posvel(obstime)
loc.differentials["s"] = CartesianDifferential.from_cartesian(vel)
return GCRS(loc, obstime=obstime)
def _get_gcrs_posvel(self, obstime, ref_to_itrs, gcrs_to_ref):
"""Calculate GCRS position and velocity given transformation matrices.
The reference frame z axis must point to the Celestial Intermediate Pole
(as is the case for CIRS and TETE).
This private method is used in intermediate_rotation_transforms,
where some of the matrices are already available for the coordinate
transformation.
The method is faster by an order of magnitude than just adding a zero
velocity to ITRS and transforming to GCRS, because it avoids calculating
the velocity via finite differencing of the results of the transformation
at three separate times.
"""
# The simplest route is to transform to the reference frame where the
# z axis is properly aligned with the Earth's rotation axis (CIRS or
# TETE), then calculate the velocity, and then transform this
# reference position and velocity to GCRS. For speed, though, we
# transform the coordinates to GCRS in one step, and calculate the
# velocities by rotating around the earth's axis transformed to GCRS.
ref_to_gcrs = matrix_transpose(gcrs_to_ref)
itrs_to_gcrs = ref_to_gcrs @ matrix_transpose(ref_to_itrs)
# Earth's rotation vector in the ref frame is rot_vec_ref = (0,0,OMEGA_EARTH),
# so in GCRS it is rot_vec_gcrs[..., 2] @ OMEGA_EARTH.
rot_vec_gcrs = CartesianRepresentation(
ref_to_gcrs[..., 2] * OMEGA_EARTH, xyz_axis=-1, copy=False
)
# Get the position in the GCRS frame.
# Since we just need the cartesian representation of ITRS, avoid get_itrs().
itrs_cart = CartesianRepresentation(self.x, self.y, self.z, copy=False)
pos = itrs_cart.transform(itrs_to_gcrs)
vel = rot_vec_gcrs.cross(pos)
return pos, vel
def get_gcrs_posvel(self, obstime):
"""
Calculate the GCRS position and velocity of this object at the
requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`
The GCRS position of the object
obsgeovel : `~astropy.coordinates.CartesianRepresentation`
The GCRS velocity of the object
"""
# Local import to prevent circular imports.
from .builtin_frames.intermediate_rotation_transforms import (
cirs_to_itrs_mat,
gcrs_to_cirs_mat,
)
# Get gcrs_posvel by transforming via CIRS (slightly faster than TETE).
return self._get_gcrs_posvel(
obstime, cirs_to_itrs_mat(obstime), gcrs_to_cirs_mat(obstime)
)
def gravitational_redshift(
self, obstime, bodies=["sun", "jupiter", "moon"], masses={}
):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict[str, `~astropy.units.Quantity`], optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
-------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if "earth" in bodies:
bodies.remove("earth")
bodies.append("earth")
_masses = {
"sun": consts.GM_sun,
"jupiter": consts.GM_jup,
"moon": consts.G * 7.34767309e22 * u.kg,
"earth": consts.GM_earth,
}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3 / u.s**2, [M_GM_equivalency]))
except KeyError as err:
raise KeyError(f'body "{body}" does not have a mass.') from err
except u.UnitsError as exc:
exc.args += (
(
'"masses" argument values must be masses or '
"gravitational parameters."
),
)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [
-GM / consts.c / distance for (GM, distance) in zip(GMs, distances)
]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
@property
def x(self):
"""The X component of the geocentric coordinates."""
return self["x"]
@property
def y(self):
"""The Y component of the geocentric coordinates."""
return self["y"]
@property
def z(self):
"""The Z component of the geocentric coordinates."""
return self["z"]
def __getitem__(self, item):
result = super().__getitem__(item)
if result.dtype is self.dtype:
return result.view(self.__class__)
else:
return result.view(u.Quantity)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, "_ellipsoid"):
self._ellipsoid = obj._ellipsoid
def __len__(self):
if self.shape == ():
raise IndexError("0-d EarthLocation arrays cannot be indexed")
else:
return super().__len__()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
# Conversion to another unit in both ``to`` and ``to_value`` goes
# via this routine. To make the regular quantity routines work, we
# temporarily turn the structured array into a regular one.
array_view = self.view(self._array_dtype, np.ndarray)
if equivalencies == []:
equivalencies = self._equivalencies
new_array = self.unit.to(unit, array_view, equivalencies=equivalencies)
return new_array.view(self.dtype).reshape(self.shape)
geodetic_base_doc = """{__doc__}
Parameters
----------
lon, lat : angle-like
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle` and either
`~astropy.coordinates.Longitude` not `~astropy.coordinates.Latitude`,
depending on the parameter.
height : `~astropy.units.Quantity` ['length']
The height to the point(s).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
@format_doc(geodetic_base_doc)
class BaseGeodeticRepresentation(BaseRepresentation):
"""Base geodetic representation."""
attr_classes = {"lon": Longitude, "lat": Latitude, "height": u.Quantity}
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if "_ellipsoid" in cls.__dict__:
ELLIPSOIDS[cls._ellipsoid] = cls
def __init__(self, lon, lat=None, height=None, copy=True):
if height is None and not isinstance(lon, self.__class__):
height = 0 << u.m
super().__init__(lon, lat, height, copy=copy)
if not self.height.unit.is_equivalent(u.m):
raise u.UnitTypeError(
f"{self.__class__.__name__} requires height with units of length."
)
def to_cartesian(self):
"""
Converts WGS84 geodetic coordinates to 3D rectangular (geocentric)
cartesian coordinates.
"""
xyz = erfa.gd2gc(
getattr(erfa, self._ellipsoid), self.lon, self.lat, self.height
)
return CartesianRepresentation(xyz, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates (assumed geocentric) to
WGS84 geodetic coordinates.
"""
lon, lat, height = erfa.gc2gd(
getattr(erfa, cls._ellipsoid), cart.get_xyz(xyz_axis=-1)
)
return cls(lon, lat, height, copy=False)
@format_doc(geodetic_base_doc)
class WGS84GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS84 3D geodetic coordinates."""
_ellipsoid = "WGS84"
@format_doc(geodetic_base_doc)
class WGS72GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS72 3D geodetic coordinates."""
_ellipsoid = "WGS72"
@format_doc(geodetic_base_doc)
class GRS80GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in GRS80 3D geodetic coordinates."""
_ellipsoid = "GRS80"
|
53b1ab956a985f6ecf4e87bb5b53c86fbb96da677c932214b5738475d133e86e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
import functools
from collections import namedtuple
import numpy as np
from astropy import units as u
from astropy.utils import isiterable
from . import angle_formats as form
__all__ = ["Angle", "Latitude", "Longitude"]
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple("hms_tuple", ("h", "m", "s"))
dms_tuple = namedtuple("dms_tuple", ("d", "m", "s"))
signed_dms_tuple = namedtuple("signed_dms_tuple", ("sign", "d", "m", "s"))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats.
The examples below illustrate common ways of initializing an `Angle`
object. First some imports::
>>> from astropy.coordinates import Angle
>>> from astropy import units as u
The angle values can now be provided::
>>> Angle('10.2345d')
<Angle 10.2345 deg>
>>> Angle(['10.2345d', '-20d'])
<Angle [ 10.2345, -20. ] deg>
>>> Angle('1:2:30.43 degrees')
<Angle 1.04178611 deg>
>>> Angle('1 2 0 hours')
<Angle 1.03333333 hourangle>
>>> Angle(np.arange(1, 8), unit=u.deg)
<Angle [1., 2., 3., 4., 5., 6., 7.] deg>
>>> Angle('1°2′3″')
<Angle 1.03416667 deg>
>>> Angle('1°2′3″N')
<Angle 1.03416667 deg>
>>> Angle('1d2m3.4s')
<Angle 1.03427778 deg>
>>> Angle('1d2m3.4sS')
<Angle -1.03427778 deg>
>>> Angle('-1h2m3s')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2m3sE')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2.5m')
<Angle -1.04166667 hourangle>
>>> Angle('-1h2.5mW')
<Angle 1.04166667 hourangle>
>>> Angle('-1:2.5', unit=u.deg)
<Angle -1.04166667 deg>
>>> Angle(10.2345 * u.deg)
<Angle 10.2345 deg>
>>> Angle(Angle(10.2345 * u.deg))
<Angle 10.2345 deg>
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=np.inexact, copy=True, **kwargs):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, str):
angle, angle_unit = form.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
if angle_unit == u.hourangle:
form._check_hour_range(angle[0])
form._check_minute_range(angle[1])
a = np.abs(angle[0]) + angle[1] / 60.0
if len(angle) == 3:
form._check_second_range(angle[2])
a += angle[2] / 3600.0
angle = np.copysign(a, angle[0])
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif isiterable(angle) and not (
isinstance(angle, np.ndarray) and angle.dtype.kind not in "SUVO"
):
angle = [Angle(x, unit, copy=False) for x in angle]
return super().__new__(cls, angle, unit, dtype=dtype, copy=copy, **kwargs)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return form.hms_to_hours(*angle)
elif unit == u.degree:
return form.dms_to_degrees(*angle)
else:
raise u.UnitsError(f"Can not parse '{angle}' as unit '{unit}'")
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit == u.hour else unit
def _set_unit(self, unit):
super()._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""
The angle's value in hours, as a named tuple with ``(h, m, s)``
members. (This is a read-only property.)
"""
return hms_tuple(*form.hours_to_hms(self.hourangle))
@property
def dms(self):
"""
The angle's value in degrees, as a named tuple with ``(d, m, s)``
members. (This is a read-only property.)
"""
return dms_tuple(*form.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""
The angle's value in degrees, as a named tuple with ``(sign, d, m, s)``
members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``. (This is a read-only property.)
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(
np.sign(self.degree), *form.degrees_to_dms(np.abs(self.degree))
)
def to_string(
self,
unit=None,
decimal=False,
sep="fromunit",
precision=None,
alwayssign=False,
pad=False,
fields=3,
format=None,
):
"""A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `False`, the returned string will be in sexagesimal form
if possible (for units of degrees or hourangle). If `True`,
a decimal representation will be used. In that case, no unit
will be appended if ``format`` is not explicitly given.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string which is the
same as with ``format='latex'`` for |Angle| instances
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
"generic": {u.degree: "dms", u.hourangle: "hms"},
"latex": {
u.degree: [r"^\circ", r"{}^\prime", r"{}^{\prime\prime}"],
u.hourangle: [r"^{\mathrm{h}}", r"^{\mathrm{m}}", r"^{\mathrm{s}}"],
},
"unicode": {u.degree: "°′″", u.hourangle: "ʰᵐˢ"},
}
# 'latex_inline' provides no functionality beyond what 'latex' offers,
# but it should be implemented to avoid ValueErrors in user code.
separators["latex_inline"] = separators["latex"]
# Default separators are as for generic.
separators[None] = separators["generic"]
# Create an iterator so we can format each element of what
# might be an array.
if not decimal and (unit_is_deg := unit == u.degree or unit == u.hourangle):
# Sexagesimal.
if sep == "fromunit":
if format not in separators:
raise ValueError(f"Unknown format '{format}'")
sep = separators[format][unit]
func = functools.partial(
form.degrees_to_string if unit_is_deg else form.hours_to_string,
precision=precision,
sep=sep,
pad=pad,
fields=fields,
)
else:
if sep != "fromunit":
raise ValueError(
f"'{unit}' can not be represented in sexagesimal notation"
)
func = ("{:g}" if precision is None else f"{{0:0.{precision}f}}").format
# Don't add unit by default for decimal.
if not (decimal and format is None):
unit_string = unit.to_string(format=format)
if format == "latex" or format == "latex_inline":
unit_string = unit_string[1:-1]
format_func = func
func = lambda x: format_func(x) + unit_string
def do_format(val):
# Check if value is not nan to avoid ValueErrors when turning it into
# a hexagesimal string.
if not np.isnan(val):
s = func(float(val))
if alwayssign and not s.startswith("-"):
s = "+" + s
if format == "latex" or format == "latex_inline":
s = f"${s}$"
return s
s = f"{val}"
return s
values = self.to_value(unit)
format_ufunc = np.vectorize(do_format, otypes=["U"])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def _wrap_at(self, wrap_angle):
"""
Implementation that assumes ``angle`` is already validated
and that wrapping is inplace.
"""
# Convert the wrap angle and 360 degrees to the native unit of
# this Angle, then do all the math on raw Numpy arrays rather
# than Quantity objects for speed.
a360 = u.degree.to(self.unit, 360.0)
wrap_angle = wrap_angle.to_value(self.unit)
wrap_angle_floor = wrap_angle - a360
self_angle = self.view(np.ndarray)
# Do the wrapping, but only if any angles need to be wrapped
#
# Catch any invalid warnings from the floor division.
with np.errstate(invalid="ignore"):
wraps = (self_angle - wrap_angle_floor) // a360
valid = np.isfinite(wraps) & (wraps != 0)
if np.any(valid):
self_angle -= wraps * a360
# Rounding errors can cause problems.
self_angle[self_angle >= wrap_angle] -= a360
self_angle[self_angle < wrap_angle_floor] += a360
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `~astropy.coordinates.Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `~astropy.coordinates.Angle`
object is wrapped in place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : angle-like
Specifies a single value for the wrap angle. This can be any
object that can initialize an `~astropy.coordinates.Angle` object,
e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `~astropy.coordinates.Angle`
Returns
-------
out : Angle or None
If ``inplace is False`` (default), return new
`~astropy.coordinates.Angle` object with angles wrapped accordingly.
Otherwise wrap in place and return `None`.
"""
wrap_angle = Angle(wrap_angle, copy=False) # Convert to an Angle
if not inplace:
self = self.copy()
self._wrap_at(wrap_angle)
return None if inplace else self
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : angle-like or None
Specifies lower bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : angle-like or None
Specifies upper bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
def formatter(x):
return x.to_string(format=format)
return np.array2string(self, formatter={"all": formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format="latex")
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitude and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value(s). If a tuple, will be interpreted as ``(h, m, s)``
or ``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
# For speed, compare using "is", which is not strictly guaranteed to hold,
# but if it doesn't we'll just convert correctly in the 'else' clause.
if angles.unit is u.deg:
limit = 90
elif angles.unit is u.rad:
limit = self.dtype.type(0.5 * np.pi)
else:
limit = u.degree.to(angles.unit, 90.0)
invalid_angles = np.any(angles.value < -limit) or np.any(angles.value > limit)
if invalid_angles:
raise ValueError(
"Latitude angle(s) must be within -90 deg <= angle <= 90 deg, "
f"got {angles.to(u.degree)}"
)
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
if value is not np.ma.masked:
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
class LongitudeInfo(u.QuantityInfo):
_represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ("wrap_angle",)
class Longitude(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : tuple or angle-like
The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted
following the rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like ['angle'], optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : angle-like or None, optional
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude):
raise TypeError(
"A Longitude angle cannot be created from a Latitude angle."
)
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, "wrap_angle", self._default_wrap_angle)
self.wrap_angle = wrap_angle # angle-like b/c property setter
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_at(self.wrap_angle)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value, copy=False)
self._wrap_at(self.wrap_angle)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, "_wrap_angle", self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
|
2101a39c2b97a665eecf7044e7b7520f32b6e726191fd38af4f9096cb9e2d430 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Dependencies
import numpy as np
# Project
from astropy import units as u
from astropy.utils import ShapedLikeNDArray
__all__ = [
"Attribute",
"TimeAttribute",
"QuantityAttribute",
"EarthLocationAttribute",
"CoordinateAttribute",
"CartesianRepresentationAttribute",
"DifferentialAttribute",
]
class Attribute:
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
name = "<unbound>"
def __init__(self, default=None, secondary_attribute=""):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def __set_name__(self, owner, name):
self.name = name
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value : object
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, "_" + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
# None if instance (frame) has no data!
instance_shape = getattr(instance, "shape", None)
if instance_shape is not None and (
getattr(out, "shape", ()) and out.shape != instance_shape
):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(
np.broadcast_to, shape=instance_shape, subok=True
)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
f"attribute {self.name} should be scalar or have shape"
f" {instance_shape}, but it has shape {out.shape} and could not"
" be broadcast."
)
converted = True
if converted:
setattr(instance, "_" + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError("Cannot set frame attribute")
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(f"Invalid time input {self.name}={value!r}.") from err
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit-like or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute="", unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out : object
The correctly-typed object.
converted : boolean
A boolean which indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (
isinstance(value, list)
and len(value) == 3
and all(v == 0 for v in value)
and self.unit is not None
):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, "xyz") and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, "xyz", value)
if not hasattr(value, "unit"):
raise TypeError(
f"tried to set a {self.__class__.__name__} with something that does"
" not have a unit."
)
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Can be `None`, which should be used for special cases in associated
frame transformations like "this quantity should be ignored" or similar.
Parameters
----------
default : number or `~astropy.units.Quantity` or None, optional
Default value for the attribute if the user does not supply one. If a
Quantity, it must be consistent with ``unit``, or if a value, ``unit``
cannot be None.
secondary_attribute : str, optional
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit-like or None, optional
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None, optional
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute="", unit=None, shape=None):
if default is None and unit is None:
raise ValueError(
"Either a default quantity value must be provided, or a unit must "
"be provided to define a QuantityAttribute."
)
if default is not None and unit is None:
unit = default.unit
self.unit = unit
self.shape = shape
default = self.convert_input(default)[0]
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if (
not hasattr(value, "unit")
and self.unit != u.dimensionless_unscaled
and np.any(value != 0)
):
raise TypeError(
"Tried to set a QuantityAttribute with "
"something that does not have a unit."
)
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
if value.shape == () and oldvalue == 0:
# Allow a single 0 to fill whatever shape is needed.
value = np.broadcast_to(value, self.shape, subok=True)
else:
raise ValueError(
f'The provided value has shape "{value.shape}", but '
f'should have shape "{self.shape}"'
)
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, "transform_to"):
raise ValueError(
f'"{value}" was passed into an EarthLocationAttribute, but it does'
' not have "transform_to" method'
)
itrsobj = value.transform_to(ITRS())
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
`~astropy.coordinates.SkyCoord` or a low-level frame instance. If a
low-level frame instance is provided, it will always be upgraded to be a
`~astropy.coordinates.SkyCoord` to ensure consistent transformation
behavior. The coordinate object will always be returned as a low-level
frame instance when accessed.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=""):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from .sky_coordinate import SkyCoord
if value is None:
return None, False
elif isinstance(value, SkyCoord) and isinstance(value.frame, self._frame):
return value.frame, True
elif isinstance(value, self._frame):
return value, False
else:
value = SkyCoord(value) # always make the value a SkyCoord
transformedobj = value.transform_to(self._frame)
return transformedobj.frame, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None, secondary_attribute=""):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if not isinstance(value, self.allowed_classes):
if len(self.allowed_classes) == 1:
value = self.allowed_classes[0](value)
else:
raise TypeError(
"Tried to set a DifferentialAttribute with an unsupported"
f" Differential type {value.__class__}. Allowed classes are:"
f" {self.allowed_classes}"
)
return value, True
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation
from .representation import BaseDifferential, CartesianRepresentation
|
b4a66c9a8c3a0b55ef692945b809e3c9f87b5a966907d5d7a1bd5d2b1d929843 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import gc
import os
import pathlib
import pickle
import sys
from collections import OrderedDict
from io import StringIO
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import table
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import (
Column,
MaskedColumn,
QTable,
Table,
TableAttribute,
TableReplaceWarning,
)
from astropy.tests.helper import assert_follows_unicode_guidelines
from astropy.time import Time, TimeDelta
from astropy.utils.compat.optional_deps import HAS_PANDAS
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.tests.test_metadata import MetaBaseTest
from .conftest import MIXIN_COLS, MaskedTable
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, "_a"):
self._a = self._column_type(
[1, 2, 3], name="a", format="%d", meta={"aa": [0, 1, 2, 3, 4]}
)
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, "_b"):
self._b = self._column_type(
[4, 5, 6], name="b", format="%d", meta={"aa": 1}
)
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, "_c"):
self._c = self._column_type([7, 8, 9], "c")
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, "_d"):
self._d = self._column_type([7, 8, 7], "d")
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, "_obj"):
self._obj = self._column_type([1, "string", 3], "obj", dtype="O")
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, "_t"):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures("table_types")
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t["a"][0] == 1
assert t["a"][1] == 20
assert t["a"][2] == 3
assert t["b"][0] == 4
assert t["b"][1] == 21
assert t["b"][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ("abc", "def")
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t["aa"] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t["aa"] == self.a)
assert t.colnames == ["aa"]
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t["aa"] = np.array([1, 2, 3]) * u.m
assert np.all(t["aa"] == np.array([1, 2, 3]))
assert t["aa"].unit == u.m
t["bb"] = 3 * u.m
assert np.all(t["bb"] == 3)
assert t["bb"].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t["bb"] = self.b
assert np.all(t["bb"] == self.b)
assert t.colnames == ["a", "bb"]
assert t["bb"].meta == self.b.meta
assert t["bb"].format == self.b.format
# Add another column
t["c"] = t["a"]
assert np.all(t["c"] == t["a"])
assert t.colnames == ["a", "bb", "c"]
assert t["c"].meta == t["a"].meta
assert t["c"].format == t["a"].format
# Add a multi-dimensional column
t["d"] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t["d"].shape == (3, 2, 2)
assert t["d"][0, 0, 1] == 1
# Add column from a list
t["e"] = ["hello", "the", "world"]
assert np.all(t["e"] == np.array(["hello", "the", "world"]))
# Make sure setting existing column still works
t["e"] = ["world", "hello", "the"]
assert np.all(t["e"] == np.array(["world", "hello", "the"]))
# Add a column via broadcasting
t["f"] = 10
assert np.all(t["f"] == 10)
# Add a column from a Quantity
t["g"] = np.array([1, 2, 3]) * u.m
assert np.all(t["g"].data == np.array([1, 2, 3]))
assert t["g"].unit == u.m
# Add a column from a (scalar) Quantity
t["g"] = 3 * u.m
assert np.all(t["g"].data == 3)
assert t["g"].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name="b", data=[1, 2, 3]) # unmasked
t["b"] = b
assert np.all(t["b"] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name="b", data=[1, 2, 3]) # masked
t["b"] = b
assert np.all(t["b"] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t["b"] = [1, 2]
@pytest.mark.usefixtures("table_types")
class TestEmptyData:
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", dtype=int, length=100))
assert len(t["a"]) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", dtype=int, shape=(3,), length=100))
assert len(t["a"]) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name="a", dtype=int))
assert len(t["a"]) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name="a", dtype=int, shape=(3, 4)))
assert len(t["a"]) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a")) # dtype is not specified
assert len(t["a"]) == 0
def test_scalar(self, table_types):
"""Test related to #3811 where setting empty tables to scalar values
should raise an error instead of having an error raised when accessing
the table."""
t = table_types.Table()
with pytest.raises(
TypeError, match="Empty table cannot have column set to scalar value"
):
t.add_column(0)
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t["a"] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures("table_types")
class TestNewFromColumns:
def test_simple(self, table_types):
cols = [
table_types.Column(name="a", data=[1, 2, 3]),
table_types.Column(name="b", data=[4, 5, 6], dtype=np.float32),
]
t = table_types.Table(cols)
assert np.all(t["a"].data == np.array([1, 2, 3]))
assert np.all(t["b"].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t["b"][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [
table_types.Column(
name="a", data=np.array([1, 2, 3], dtype=np.int64), dtype=np.float64
),
table_types.Column(name="b", data=np.array([4, 5, 6], dtype=np.float32)),
]
t = table_types.Table(cols)
assert np.all(t["a"] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t["b"] == np.array([4, 5, 6], dtype=np.float32))
assert type(t["a"][1]) is np.float64
assert type(t["b"][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [
table_types.Column(name="a", data=[1, 2, 3]),
table_types.Column(name="b", data=[4, 5, 6, 7]),
]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name="c")
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, "d"))
assert t.colnames == ["c", "d"]
t = table_types.Table([c, d])
assert t.colnames == ["c", "col1"]
@pytest.mark.usefixtures("table_types")
class TestReverse:
def test_reverse(self, table_types):
t = table_types.Table(
[
[1, 2, 3],
["a", "b", "cc"],
]
)
t.reverse()
assert np.all(t["col0"] == np.array([3, 2, 1]))
assert np.all(t["col1"] == np.array(["cc", "b", "a"]))
t2 = table_types.Table(t, copy=False)
assert np.all(t2["col0"] == np.array([3, 2, 1]))
assert np.all(t2["col1"] == np.array(["cc", "b", "a"]))
t2 = table_types.Table(t, copy=True)
assert np.all(t2["col0"] == np.array([3, 2, 1]))
assert np.all(t2["col1"] == np.array(["cc", "b", "a"]))
t2.sort("col0")
assert np.all(t2["col0"] == np.array([1, 2, 3]))
assert np.all(t2["col1"] == np.array(["a", "b", "cc"]))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=("x", "y"))
t.reverse()
assert np.all(t["x"] == x[::-1])
assert np.all(t["y"] == y[::-1])
def test_reverse_mixin(self):
"""Test reverse for a mixin with no item assignment, fix for #9836"""
sc = SkyCoord([1, 2], [3, 4], unit="deg")
t = Table([[2, 1], sc], names=["a", "sc"])
t.reverse()
assert np.all(t["a"] == [1, 2])
assert np.allclose(t["sc"].ra.to_value("deg"), [2, 1])
@pytest.mark.usefixtures("table_types")
class TestRound:
def test_round_int(self, table_types):
t = table_types.Table(
[
["a", "b", "c"],
[1.11, 2.3, 3.0],
[1.123456, 2.9876, 3.901],
]
)
t.round()
assert np.all(t["col0"] == ["a", "b", "c"])
assert np.all(t["col1"] == [1.0, 2.0, 3.0])
assert np.all(t["col2"] == [1.0, 3.0, 4.0])
def test_round_dict(self, table_types):
t = table_types.Table(
[
["a", "b", "c"],
[1.5, 2.5, 3.0111],
[1.123456, 2.9876, 3.901],
]
)
t.round({"col1": 0, "col2": 3})
assert np.all(t["col0"] == ["a", "b", "c"])
assert np.all(t["col1"] == [2.0, 2.0, 3.0])
assert np.all(t["col2"] == [1.123, 2.988, 3.901])
def test_round_invalid(self, table_types):
t = table_types.Table([[1, 2, 3]])
with pytest.raises(
ValueError, match="'decimals' argument must be an int or a dict"
):
t.round(0.5)
def test_round_kind(self, table_types):
for typecode in "bBhHiIlLqQpPefdgFDG": # AllInteger, AllFloat
arr = np.array([4, 16], dtype=typecode)
t = Table([arr])
col0 = t["col0"]
t.round(decimals=-1) # Round to nearest 10
assert np.all(t["col0"] == [0, 20])
assert t["col0"] is col0
@pytest.mark.usefixtures("table_types")
class TestColumnAccess:
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t["a"]
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[1, 2, 3]))
assert np.all(t["a"] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t["b"] # column does not exist
def test_itercols(self, table_types):
names = ["a", "b", "c"]
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures("table_types")
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(
table_types.Column(name="b", data=[4, 5, 6, 7])
) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name="b", data=[4, 5])) # data too short
@pytest.mark.usefixtures("table_types")
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column("b")
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.colnames == ["a", "b"]
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column("a"))
assert t.colnames == ["b", "a"]
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column("a") + 1)
assert t.colnames == ["a", "b"]
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column("a") + 1)
t.add_column(self.c, t.index_column("b"))
assert t.colnames == ["a", "c", "b"]
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column("a")
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.colnames == ["c", "a", "b"]
@pytest.mark.usefixtures("table_types")
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name="b")
t.add_column(self.b, name="a")
assert t.colnames == ["b", "a"]
# Check that we did not change the name of the input column
assert self.a.info.name == "a"
assert self.b.info.name == "b"
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t["a"], name="c")
assert t2.colnames == ["c"]
# Check that we did not change the name of the input column
assert t.colnames == ["b", "a"]
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name="c")
assert t.colnames == ["b", "a", "c"]
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.colnames == ["col0"]
@pytest.mark.usefixtures("table_types")
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols["a"], cols["b"], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols["a"])
t2b.add_column(cols["b"])
t2b.add_column(self.c)
t["a"][1] = 20
t["b"][1] = 21
for t2 in [t2a, t2b]:
t2["a"][2] = 10
t2["b"][2] = 11
t2["c"][2] = 12
t2.columns["a"].meta["aa"][3] = 10
assert np.all(t["a"] == np.array([1, 20, 3]))
assert np.all(t["b"] == np.array([4, 21, 6]))
assert np.all(t2["a"] == np.array([1, 2, 10]))
assert np.all(t2["b"] == np.array([4, 5, 11]))
assert np.all(t2["c"] == np.array([7, 8, 12]))
assert t2["a"].name == "a"
assert t2.columns["a"].meta["aa"][3] == 10
assert t.columns["a"].meta["aa"][3] == 3
@pytest.mark.usefixtures("table_types")
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ["a", "b", "c"]
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ["a", "b", "c", "d"]
assert np.all(t["c"] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ["d", "a", "c", "b"]
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ["c", "d", "a", "b"]
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ["a", "b", "c", "d"]
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=["b", "c", "a"])
assert t.colnames == ["b", "c", "a"]
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ["col0", "col1"]
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name="a", data=[0, 1, 2]))
t.add_column(
table_types.Column(name="a", data=[0, 1, 2]), rename_duplicate=True
)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ["a", "a_1", "b", "c"]
t.add_column(
table_types.Column(name="a", data=[0, 1, 2]), rename_duplicate=True
)
assert t.colnames == ["a", "a_1", "b", "c", "a_2"]
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1["a"])
t.add_column(t1["a"], rename_duplicate=True)
t1["a"][0] = 100 # Change original column
assert t.colnames == ["a", "a_1", "b", "c", "a_2", "a_3"]
assert t1.colnames == ["a"]
# Check new column didn't change (since name conflict forced a copy)
assert t["a_3"][0] == self.a[0]
# Check that rename_duplicate=True is ok if there are no duplicates
t.add_column(
table_types.Column(name="q", data=[0, 1, 2]), rename_duplicate=True
)
assert t.colnames == ["a", "a_1", "b", "c", "a_2", "a_3", "q"]
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns(
[
table_types.Column(name="a", data=[0, 1, 2]),
table_types.Column(name="b", data=[0, 1, 2]),
]
)
t.add_columns(
[
table_types.Column(name="a", data=[0, 1, 2]),
table_types.Column(name="b", data=[0, 1, 2]),
],
rename_duplicate=True,
)
t.add_column(self.d)
assert t.colnames == ["a", "b", "c", "a_1", "b_1", "d"]
@pytest.mark.usefixtures("table_types")
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, "_b"):
self._b = self._column_type(name="b", data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, "_c"):
self._c = self._column_type(name="c", data=["7", "8", "9"])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, "_d"):
self._d = self._column_type(name="d", data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, "_t"):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=("a", "b", "c"), dtype=("(2,)i", "S4", "O"))
t.add_row()
assert np.all(t["a"][0] == [0, 0])
assert t["b"][0] == ""
assert t["c"][0] == 0
t.add_row()
assert np.all(t["a"][1] == [0, 0])
assert t["b"][1] == ""
assert t["c"][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=("a", "b", "obj"), dtype=("(2,)i", "S8", "O"))
t.add_row([[1, 2], "hello", "world"])
assert np.all(t["a"][0] == [1, 2])
assert t["b"][0] == "hello"
assert t["obj"][0] == "world"
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t["a"][1] == [0, 0])
assert t["b"][1] == ""
assert t["obj"][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t["d"] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 1]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t["c"] == np.array(["7", "8", "9", "7"]))
assert np.all(t["d"] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 1]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t["obj"] == np.array([1, "string", 3, [10]], dtype="O"))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt["col0"] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, "1"))
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t["c"] == np.array(["7", "8", "9", "1"]))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, "10"])
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t["c"] == np.array(["7", "8", "9", "10"]))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({"a": 4, "b": 7.2})
assert len(t) == 4
assert np.all(t["a"] == np.array([1, 2, 3, 4]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t["c"] == np.array(["7", "8", "9", "7"]))
else:
assert np.all(t["c"] == np.array(["7", "8", "9", ""]))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t["a"].data == np.array([1, 2, 3, 0]))
assert np.allclose(t["b"], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t["c"].data == np.array(["7", "8", "9", ""]))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({"bad_column": 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(["one", 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, "x", [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == "f":
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures("table_types")
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns["a"]
a[2] = 10
assert t["a"][2] == 10
@pytest.mark.usefixtures("table_types")
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name="b", dtype=int, shape=(2,), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t["b"].shape == (3, 2)
assert t["b"][0].shape == (2,)
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name="b", dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t["b"].shape == (3, 2, 4)
assert t["b"][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name="b", dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t["b"].shape == (3, 2, 4, 6)
assert t["b"][0].shape == (2, 4, 6)
@pytest.mark.usefixtures("table_types")
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, "_t"):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, "_t2"):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns("a")
assert self.t.colnames == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray) # noqa: E711
assert (self.t == None).size == 0 # noqa: E711
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns("a")
assert self.t.colnames == ["b"]
assert self.t.dtype.names == ("b",)
assert np.all(self.t["b"] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t["new_column"] = self.t["a"]
assert "new_column" in self.t.columns.keys()
self.t.remove_columns("new_column")
assert "new_column" not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["b"] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["a"] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t["a"].meta == {"aa": [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([("a", "int"), ("b", "int")])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["a"] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ["a", "b", "c"]
assert np.all(self.t["c"] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.0]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t["a"]
assert self.t.colnames == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray) # noqa: E711
assert (self.t == None).size == 0 # noqa: E711
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2["b"]
assert self.t2.colnames == ["a", "c"]
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2["a", "b"]
assert self.t2.colnames == ["c"]
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t["d"]
@pytest.mark.usefixtures("table_types")
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.colnames == []
assert t.as_array().size == 0
# Regression test for gh-8640
assert not t
assert isinstance(t == None, np.ndarray) # noqa: E711
assert (t == None).size == 0 # noqa: E711
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns("b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
assert np.all(t["b"] == np.array([4, 5, 6]))
@pytest.mark.usefixtures("table_types")
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column("a", "b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
assert np.all(t["b"] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column("a", "c")
t.rename_column("b", "a")
assert t.colnames == ["c", "a"]
assert t.dtype.names == ("c", "a")
if t.masked:
assert t.mask.dtype.names == ("c", "a")
assert np.all(t["c"] == np.array([1, 2, 3]))
assert np.all(t["a"] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t["a"].name = "c"
t["b"].name = "a"
assert t.colnames == ["c", "a"]
assert t.dtype.names == ("c", "a")
assert np.all(t["c"] == np.array([1, 2, 3]))
assert np.all(t["a"] == np.array([4, 5, 6]))
def test_rename_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
t.rename_columns(("a", "b", "c"), ("aa", "bb", "cc"))
assert t.colnames == ["aa", "bb", "cc"]
t.rename_columns(["bb", "cc"], ["b", "c"])
assert t.colnames == ["aa", "b", "c"]
with pytest.raises(TypeError):
t.rename_columns("aa", ["a"])
with pytest.raises(ValueError):
t.rename_columns(["a"], ["b", "c"])
@pytest.mark.usefixtures("table_types")
class TestSort:
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4]))
t.add_column(
table_types.Column(
name="c",
data=[
(1, 2),
(3, 4),
(4, 5),
],
)
)
assert np.all(t["a"] == np.array([2, 1, 3]))
assert np.all(t["b"] == np.array([6, 5, 4]))
t.sort("a")
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["b"] == np.array([5, 6, 4]))
assert np.all(
t["c"]
== np.array(
[
[3, 4],
[1, 2],
[4, 5],
]
)
)
t.sort("b")
assert np.all(t["a"] == np.array([3, 1, 2]))
assert np.all(t["b"] == np.array([4, 5, 6]))
assert np.all(
t["c"]
== np.array(
[
[4, 5],
[3, 4],
[1, 2],
]
)
)
@pytest.mark.parametrize("create_index", [False, True])
def test_single_reverse(self, table_types, create_index):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4]))
t.add_column(table_types.Column(name="c", data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t["a"] == np.array([2, 1, 3]))
assert np.all(t["b"] == np.array([6, 5, 4]))
t.sort("a", reverse=True)
assert np.all(t["a"] == np.array([3, 2, 1]))
assert np.all(t["b"] == np.array([4, 6, 5]))
assert np.all(t["c"] == np.array([[4, 5], [1, 2], [3, 4]]))
t.sort("b", reverse=True)
assert np.all(t["a"] == np.array([2, 1, 3]))
assert np.all(t["b"] == np.array([6, 5, 4]))
assert np.all(t["c"] == np.array([[1, 2], [3, 4], [4, 5]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=("x", "y"))
t.sort("y")
idx = np.argsort(y)
assert np.all(t["x"] == x[idx])
assert np.all(t["y"] == y[idx])
@pytest.mark.parametrize("reverse", [True, False])
def test_empty_reverse(self, table_types, reverse):
t = table_types.Table([[], []], dtype=["f4", "U1"])
t.sort("col1", reverse=reverse)
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
assert np.all(t["a"] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t["b"] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(["a", "b"])
assert np.all(t["a"] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t["b"] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(["b", "a"])
assert np.all(t["a"] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t["b"] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(("a", "b"))
assert np.all(t["a"] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t["b"] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
assert np.all(t["a"] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t["b"] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(["a", "b"], reverse=True)
assert np.all(t["a"] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t["b"] == np.array([5, 4, 6, 3, 5, 4]))
t.sort(["b", "a"], reverse=True)
assert np.all(t["a"] == np.array([2, 3, 1, 3, 1, 2]))
assert np.all(t["b"] == np.array([6, 5, 5, 4, 4, 3]))
t.sort(("a", "b"), reverse=True)
assert np.all(t["a"] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t["b"] == np.array([5, 4, 6, 3, 5, 4]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(
table_types.Column(name="firstname", data=[b"Max", b"Jo", b"John"])
)
t.add_column(
table_types.Column(name="name", data=[b"Miller", b"Miller", b"Jackson"])
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
t.sort(["name", "firstname"])
assert np.all([t["firstname"] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t["name"] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t["tel"] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(
table_types.Column(
name="firstname", data=[str(x) for x in ["Max", "Jo", "John"]]
)
)
t.add_column(
table_types.Column(
name="name", data=[str(x) for x in ["Miller", "Miller", "Jackson"]]
)
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
t.sort(["name", "firstname"])
assert np.all(
[t["firstname"] == np.array([str(x) for x in ["John", "Jo", "Max"]])]
)
assert np.all(
[t["name"] == np.array([str(x) for x in ["Jackson", "Miller", "Miller"]])]
)
assert np.all([t["tel"] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort("a")
i1 = t.as_array().argsort(order=["a"])
assert np.all(t["a"][i0] == t["a"][i1])
i0 = t.argsort(["a", "b"])
i1 = t.as_array().argsort(order=["a", "b"])
assert np.all(t["a"][i0] == t["a"][i1])
assert np.all(t["b"][i0] == t["b"][i1])
@pytest.mark.parametrize("add_index", [False, True])
def test_argsort_reverse(self, table_types, add_index):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name="b", data=[6, 5, 4, 3, 5, 4]))
if add_index:
t.add_index("a")
assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5]))
i0 = t.argsort("a", reverse=True)
i1 = np.array([4, 2, 3, 0, 5, 1])
assert np.all(t["a"][i0] == t["a"][i1])
i0 = t.argsort(["a", "b"], reverse=True)
i1 = np.array([4, 2, 0, 3, 1, 5])
assert np.all(t["a"][i0] == t["a"][i1])
assert np.all(t["b"][i0] == t["b"][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(
table_types.Column(name="firstname", data=[b"Max", b"Jo", b"John"])
)
t.add_column(
table_types.Column(name="name", data=[b"Miller", b"Miller", b"Jackson"])
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
assert np.all(t.argsort(["name", "firstname"]) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(
table_types.Column(
name="firstname", data=[str(x) for x in ["Max", "Jo", "John"]]
)
)
t.add_column(
table_types.Column(
name="name", data=[str(x) for x in ["Miller", "Miller", "Jackson"]]
)
)
t.add_column(table_types.Column(name="tel", data=[12, 15, 19]))
assert np.all(t.argsort(["name", "firstname"]) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=("a",))
assert t.colnames == ["a"]
assert t.dtype.names == ("a",)
t.add_row((2,))
assert t.colnames == ["a"]
assert t.dtype.names == ("a",)
t.rename_column("a", "b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
t.sort("b")
assert t.colnames == ["b"]
assert t.dtype.names == ("b",)
t.rename_column("b", "c")
assert t.colnames == ["c"]
assert t.dtype.names == ("c",)
@pytest.mark.parametrize("kwargs", [{}, {"kind": "stable"}, {"kind": "quicksort"}])
def test_sort_kind(kwargs):
t = Table()
t["a"] = [2, 1, 3, 2, 3, 1]
t["b"] = [6, 5, 4, 3, 5, 4]
t_struct = t.as_array()
# Since sort calls Table.argsort this covers `kind` for both methods
t.sort(["a", "b"], **kwargs)
assert np.all(t.as_array() == np.sort(t_struct, **kwargs))
@pytest.mark.usefixtures("table_types")
class TestIterator:
def test_iterator(self, table_types):
d = np.array(
[
(2, 1),
(3, 6),
(4, 5),
],
dtype=[("a", "i4"), ("b", "i4")],
)
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures("table_types")
class TestSetMeta:
def test_set_meta(self, table_types):
d = table_types.Table(names=("a", "b"))
d.meta["a"] = 1
d.meta["b"] = 1
d.meta["c"] = 1
d.meta["d"] = 1
assert list(d.meta.keys()) == ["a", "b", "c", "d"]
@pytest.mark.usefixtures("table_types")
class TestConvertNumpyArray:
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[("c", "i8"), ("d", "i8")])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = (">", "<")
native_order = byte_orders[sys.byteorder == "little"]
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name="a", dtype=order + "f8")
t = table_types.Table([col])
arr = t.as_array()
assert arr["a"].dtype.byteorder in (native_order, "=")
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr["a"].dtype.byteorder in (order, "=")
else:
assert arr["a"].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = (">", "<")[sys.byteorder != "little"]
filename = get_pkg_data_filename("data/tb.fits", "astropy.io.fits.tests")
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert data[colname].dtype.byteorder == arr2[colname].dtype.byteorder
def test_convert_numpy_object_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
# Single table
np_d = np.array(d, dtype=object)
assert isinstance(np_d, np.ndarray)
assert np_d[()] is d
def test_convert_list_numpy_object_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
ds = [d, d, d]
np_ds = np.array(ds, dtype=object)
assert all([isinstance(t, table_types.Table) for t in np_ds])
assert all([np.array_equal(t, d) for t in np_ds])
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table(
[[1, 2, 3], [2, 3, 4]], names=["x", "y"], masked=True, meta={"name": "test"}
)
t["x"].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_values_equal_part1():
col1 = [1, 2]
col2 = [1.0, 2.0]
col3 = ["a", "b"]
t1 = table.Table([col1, col2, col3], names=["a", "b", "c"])
t2 = table.Table([col1, col2], names=["a", "b"])
t3 = table.table_helpers.simple_table()
tm = t1.copy()
tm["time"] = Time([1, 2], format="cxcsec")
tm1 = tm.copy()
tm1["time"][0] = np.ma.masked
tq = table.table_helpers.simple_table()
tq["quantity"] = [1.0, 2.0, 3.0] * u.m
tsk = table.table_helpers.simple_table()
tsk["sk"] = SkyCoord(1, 2, unit="deg")
eqsk = tsk.values_equal(tsk)
for col in eqsk.itercols():
assert np.all(col)
with pytest.raises(
ValueError, match="cannot compare tables with different column names"
):
t2.values_equal(t1)
with pytest.raises(ValueError, match="unable to compare column a"):
# Shape mismatch
t3.values_equal(t1)
with pytest.raises(ValueError, match="unable to compare column c"):
# Type mismatch in column c causes FutureWarning
t1.values_equal(2)
with pytest.raises(ValueError, match="unable to compare column c"):
t1.values_equal([1, 2])
eq = t2.values_equal(t2)
for col in eq.colnames:
assert np.all(eq[col] == [True, True])
eq1 = tm1.values_equal(tm)
for col in eq1.colnames:
assert np.all(eq1[col] == [True, True])
eq2 = tq.values_equal(tq)
for col in eq2.colnames:
assert np.all(eq2[col] == [True, True, True])
eq3 = t2.values_equal(2)
for col in eq3.colnames:
assert np.all(eq3[col] == [False, True])
eq4 = t2.values_equal([1, 2])
for col in eq4.colnames:
assert np.all(eq4[col] == [True, True])
# Compare table to its first row
t = table.Table(rows=[(1, "a"), (1, "b")])
eq = t.values_equal(t[0])
assert np.all(eq["col0"] == [True, True])
assert np.all(eq["col1"] == [True, False])
def test_rows_equal():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all(
(t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
assert np.all(
(t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
def test_equality_masked():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask["a"][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all(
(t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
t = table.Table(t, masked=True)
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
assert np.all(
(t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance("", bytes):
return
# Define unicode literals
string_a = "астрономическая питона"
string_b = "миллиарды световых лет"
a = table.Table([[string_a, 2], [string_b, 3]], names=("a", "b"))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode("utf-8") in bytes(a)
def test_unicode_policy():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
assert_follows_unicode_guidelines(t)
@pytest.mark.parametrize("uni", ["питона", "ascii"])
def test_unicode_bytestring_conversion(table_types, uni):
"""
Test converting columns to all unicode or all bytestring. This
makes two columns, one which is unicode (str in Py3) and one which
is bytes (UTF-8 encoded). There are two code paths in the conversions,
a faster one where the data are actually ASCII and a slower one where
UTF-8 conversion is required. This tests both via the ``uni`` param.
"""
byt = uni.encode("utf-8")
t = table_types.Table([[byt], [uni], [1]], dtype=("S", "U", "i"))
assert t["col0"].dtype.kind == "S"
assert t["col1"].dtype.kind == "U"
assert t["col2"].dtype.kind == "i"
t["col0"].description = "col0"
t["col1"].description = "col1"
t["col0"].meta["val"] = "val0"
t["col1"].meta["val"] = "val1"
# Unicode to bytestring
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1["col0"].dtype.kind == "S"
assert t1["col1"].dtype.kind == "S"
assert t1["col2"].dtype.kind == "i"
# Meta made it through
assert t1["col0"].description == "col0"
assert t1["col1"].description == "col1"
assert t1["col0"].meta["val"] == "val0"
assert t1["col1"].meta["val"] == "val1"
# Need to de-fang the automatic unicode sandwiching of Table
assert np.array(t1["col0"])[0] == byt
assert np.array(t1["col1"])[0] == byt
assert np.array(t1["col2"])[0] == 1
# Bytestring to unicode
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1["col0"].dtype.kind == "U"
assert t1["col1"].dtype.kind == "U"
assert t1["col2"].dtype.kind == "i"
# Meta made it through
assert t1["col0"].description == "col0"
assert t1["col1"].description == "col1"
assert t1["col0"].meta["val"] == "val0"
assert t1["col1"].meta["val"] == "val1"
# No need to de-fang the automatic unicode sandwiching of Table here, but
# do just for consistency to prove things are working.
assert np.array(t1["col0"])[0] == uni
assert np.array(t1["col1"])[0] == uni
assert np.array(t1["col2"])[0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({"a": [1, 2, 3]})
the_id = id(t)
assert t["a"].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=["a"])
out = []
for r1 in t:
for r2 in t:
out.append((r1["a"], r2["a"]))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif(not HAS_PANDAS, reason="requires pandas")
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ["<", ">", "="]:
for kind in ["f", "i"]:
for byte in ["2", "4", "8"]:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x.newbyteorder(endian)
t["u"] = ["a", "b", "c"]
t["s"] = ["a", "b", "c"]
d = t.to_pandas()
for column in t.columns:
if column == "u":
assert np.all(t["u"] == np.array(["a", "b", "c"]))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == "s":
assert np.all(t["s"] == np.array(["a", "b", "c"]))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.isnative:
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
# Explicitly testing little/big/native endian separately -
# regression for a case in astropy/astropy#11286 not caught by #3729.
d[["<i4", ">i4"]]
d[["<f4", ">f4"]]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ("u", "s"):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.isnative:
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
@pytest.mark.parametrize("unsigned", ["u", ""])
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_nullable_int(self, unsigned, bits):
np_dtype = f"{unsigned}int{bits}"
c = MaskedColumn([1, 2], mask=[False, True], dtype=np_dtype)
t = Table([c])
df = t.to_pandas()
pd_dtype = np_dtype.replace("i", "I").replace("u", "U")
assert str(df["col0"].dtype) == pd_dtype
t2 = Table.from_pandas(df)
assert str(t2["col0"].dtype) == np_dtype
assert np.all(t2["col0"].mask == [False, True])
assert np.all(t2["col0"] == c)
def test_2d(self):
t = table.Table()
t["a"] = [1, 2, 3]
t["b"] = np.ones((3, 2))
with pytest.raises(
ValueError, match="Cannot convert a table with multidimensional columns"
):
t.to_pandas()
def test_mixin_pandas(self):
t = table.QTable()
for name in sorted(MIXIN_COLS):
if not name.startswith("ndarray"):
t[name] = MIXIN_COLS[name]
t["dt"] = TimeDelta([0, 2, 4, 6], format="sec")
tp = t.to_pandas()
t2 = table.Table.from_pandas(tp)
assert np.allclose(t2["quantity"], [0, 1, 2, 3])
assert np.allclose(t2["longitude"], [0.0, 1.0, 5.0, 6.0])
assert np.allclose(t2["latitude"], [5.0, 6.0, 10.0, 11.0])
assert np.allclose(t2["skycoord.ra"], [0, 1, 2, 3])
assert np.allclose(t2["skycoord.dec"], [0, 1, 2, 3])
assert np.allclose(t2["arraywrap"], [0, 1, 2, 3])
assert np.allclose(t2["arrayswap"], [0, 1, 2, 3])
assert np.allclose(
t2["earthlocation.y"], [0, 110708, 547501, 654527], rtol=0, atol=1
)
# For pandas, Time, TimeDelta are the mixins that round-trip the class
assert isinstance(t2["time"], Time)
assert np.allclose(t2["time"].jyear, [2000, 2001, 2002, 2003])
assert np.all(
t2["time"].isot
== [
"2000-01-01T12:00:00.000",
"2000-12-31T18:00:00.000",
"2002-01-01T00:00:00.000",
"2003-01-01T06:00:00.000",
]
)
assert t2["time"].format == "isot"
# TimeDelta
assert isinstance(t2["dt"], TimeDelta)
assert np.allclose(t2["dt"].value, [0, 2, 4, 6])
assert t2["dt"].format == "sec"
@pytest.mark.parametrize("use_IndexedTable", [False, True])
def test_to_pandas_index(self, use_IndexedTable):
"""Test to_pandas() with different indexing options.
This also tests the fix for #12014. The exception seen there is
reproduced here without the fix.
"""
import pandas as pd
class IndexedTable(table.QTable):
"""Always index the first column"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_index(self.colnames[0])
row_index = pd.RangeIndex(0, 2, 1)
tm_index = pd.DatetimeIndex(
["1998-01-01", "2002-01-01"], dtype="datetime64[ns]", name="tm", freq=None
)
tm = Time([1998, 2002], format="jyear")
x = [1, 2]
table_cls = IndexedTable if use_IndexedTable else table.QTable
t = table_cls([tm, x], names=["tm", "x"])
tp = t.to_pandas()
if not use_IndexedTable:
assert np.all(tp.index == row_index)
tp = t.to_pandas(index="tm")
assert np.all(tp.index == tm_index)
t.add_index("tm")
tp = t.to_pandas()
assert np.all(tp.index == tm_index)
# Make sure writing to pandas didn't hack the original table
assert t["tm"].info.indices
tp = t.to_pandas(index=True)
assert np.all(tp.index == tm_index)
tp = t.to_pandas(index=False)
assert np.all(tp.index == row_index)
with pytest.raises(ValueError) as err:
t.to_pandas(index="not a column")
assert "index must be None, False" in str(err.value)
def test_mixin_pandas_masked(self):
tm = Time([1, 2, 3], format="cxcsec")
dt = TimeDelta([1, 2, 3], format="sec")
tm[1] = np.ma.masked
dt[1] = np.ma.masked
t = table.QTable([tm, dt], names=["tm", "dt"])
tp = t.to_pandas()
assert np.all(tp["tm"].isnull() == [False, True, False])
assert np.all(tp["dt"].isnull() == [False, True, False])
t2 = table.Table.from_pandas(tp)
assert np.all(t2["tm"].mask == tm.mask)
assert np.ma.allclose(t2["tm"].jd, tm.jd, rtol=1e-14, atol=1e-14)
assert np.all(t2["dt"].mask == dt.mask)
assert np.ma.allclose(t2["dt"].jd, dt.jd, rtol=1e-14, atol=1e-14)
def test_from_pandas_index(self):
tm = Time([1998, 2002], format="jyear")
x = [1, 2]
t = table.Table([tm, x], names=["tm", "x"])
tp = t.to_pandas(index="tm")
t2 = table.Table.from_pandas(tp)
assert t2.colnames == ["x"]
t2 = table.Table.from_pandas(tp, index=True)
assert t2.colnames == ["tm", "x"]
assert np.allclose(t2["tm"].jyear, tm.jyear)
@pytest.mark.parametrize("use_nullable_int", [True, False])
def test_masking(self, use_nullable_int):
t = table.Table(masked=True)
t["a"] = [1, 2, 3]
t["a"].mask = [True, False, True]
t["b"] = [1.0, 2.0, 3.0]
t["b"].mask = [False, False, True]
t["u"] = ["a", "b", "c"]
t["u"].mask = [False, True, False]
t["s"] = ["a", "b", "c"]
t["s"].mask = [False, True, False]
# https://github.com/astropy/astropy/issues/7741
t["Source"] = [2584290278794471936, 2584290038276303744, 2584288728310999296]
t["Source"].mask = [False, False, False]
if use_nullable_int: # Default
# No warning with the default use_nullable_int=True
d = t.to_pandas(use_nullable_int=use_nullable_int)
else:
with pytest.warns(
TableReplaceWarning,
match=r"converted column 'a' from int(32|64) to float64",
):
d = t.to_pandas(use_nullable_int=use_nullable_int)
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
if hasattr(t2[name], "mask"):
assert np.all(column.mask == t2[name].mask)
if column.dtype.kind == "i":
if np.any(column.mask) and not use_nullable_int:
assert t2[name].dtype.kind == "f"
else:
assert t2[name].dtype.kind == "i"
# This warning pops up when use_nullable_int is False
# for pandas 1.5.2.
with np.errstate(invalid="ignore"):
assert_array_equal(column.data, t2[name].data.astype(column.dtype))
else:
if column.dtype.byteorder in ("=", "|"):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
def test_units(self):
import pandas as pd
import astropy.units as u
df = pd.DataFrame({"x": [1, 2, 3], "t": [1.3, 1.2, 1.8]})
t = table.Table.from_pandas(df, units={"x": u.m, "t": u.s})
assert t["x"].unit == u.m
assert t["t"].unit == u.s
# test error if not a mapping
with pytest.raises(TypeError):
table.Table.from_pandas(df, units=[u.m, u.s])
# test warning is raised if additional columns in units dict
with pytest.warns(UserWarning) as record:
table.Table.from_pandas(df, units={"x": u.m, "t": u.s, "y": u.m})
assert len(record) == 1
assert "{'y'}" in record[0].message.args[0]
def test_to_pandas_masked_int_data_with__index(self):
data = {"data": [0, 1, 2], "index": [10, 11, 12]}
t = table.Table(data=data, masked=True)
t.add_index("index")
t["data"].mask = [1, 1, 0]
df = t.to_pandas()
assert df["data"].iloc[-1] == 2
@pytest.mark.usefixtures("table_types")
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(
ValueError,
match=r"Cannot replace column 'a'. Use " "Table.replace_column.. instead.",
):
t.columns["a"] = [1, 2, 3]
with pytest.raises(
ValueError, match=r"column name not there is not in the table"
):
t.replace_column("not there", [1, 2, 3])
with pytest.raises(
ValueError, match=r"length of new column must match table length"
):
t.replace_column("a", [1, 2])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t["a"]
tb = t["b"]
vals = [1.2, 3.4, 5.6]
for col in (
vals,
table_types.Column(vals),
table_types.Column(vals, name="a"),
table_types.Column(vals, name="b"),
):
t.replace_column("a", col)
assert np.all(t["a"] == vals)
assert t["a"] is not ta # New a column
assert t["b"] is tb # Original b column unchanged
assert t.colnames == ["a", "b"]
assert t["a"].meta == {}
assert t["a"].format is None
# Special case: replacing the only column can resize table
del t["b"]
assert len(t) == 3
t["a"] = [1, 2]
assert len(t) == 2
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index("a")
with pytest.raises(ValueError) as err:
t.replace_column("a", [1, 2, 3])
assert err.value.args[0] == "cannot replace a table index column"
def test_replace_column_no_copy(self):
t = Table([[1, 2], [3, 4]], names=["a", "b"])
a = np.array([1.5, 2.5])
t.replace_column("a", a, copy=False)
assert t["a"][0] == a[0]
t["a"][0] = 10
assert t["a"][0] == a[0]
class TestQTableColumnConversionCornerCases:
def test_replace_with_masked_col_with_units_in_qtable(self):
"""This is a small regression from #8902"""
t = QTable([[1, 2], [3, 4]], names=["a", "b"])
t["a"] = MaskedColumn([5, 6], unit="m")
assert isinstance(t["a"], u.Quantity)
def test_do_not_replace_string_column_with_units_in_qtable(self):
t = QTable([[1 * u.m]])
with pytest.warns(AstropyUserWarning, match="convert it to Quantity failed"):
t["a"] = Column(["a"], unit=u.m)
assert isinstance(t["a"], Column)
class Test__Astropy_Table__:
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3], [4, 5, 6], [7, 8, 9] * u.m]
self.names = ["a", "b", "c"]
self.meta = OrderedDict([("a", 1), ("b", 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = "c"
cols = [table.Column(a, name="a"), table.MaskedColumn(b, name="b"), c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.Column
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta="extra!")
assert t.colnames == ["a", "b", "c"]
assert t.meta == {"extra_meta": "extra!"}
assert np.all(t["a"] == st.columns[0])
assert np.all(t["b"] == st.columns[1])
vals = t["c"].value if table_cls is table.QTable else t["c"]
assert np.all(st.columns[2].value == vals)
assert isinstance(t["a"], table.Column)
assert isinstance(t["b"], table.MaskedColumn)
assert isinstance(t["c"], col_c_class)
assert t["c"].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t["a"][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ["a", "b", "c"]
meta = OrderedDict([("c", 3)])
t = table.Table(st, dtype=dtypes, names=names, meta=meta)
assert t.colnames == names
assert all(
col.dtype.type is dtype for col, dtype in zip(t.columns.values(), dtypes)
)
# The supplied meta is overrides the existing meta. Changed in astropy 3.2.
assert t.meta != st.meta
assert t.meta == meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta="extra!")
assert "__init__() got unexpected keyword argument" in str(err.value)
class TestUpdate:
def _setup(self):
self.a = Column((1, 2, 3), name="a")
self.b = Column((4, 5, 6), name="b")
self.c = Column((7, 8, 9), name="c")
self.d = Column((10, 11, 12), name="d")
def test_different_lengths(self):
self._setup()
t1 = Table([self.a])
t2 = Table([self.b[:-1]])
msg = "Inconsistent data column lengths"
with pytest.raises(ValueError, match=msg):
t1.update(t2)
# If update didn't succeed then t1 and t2 should not have changed.
assert t1.colnames == ["a"]
assert np.all(t1["a"] == self.a)
assert t2.colnames == ["b"]
assert np.all(t2["b"] == self.b[:-1])
def test_invalid_inputs(self):
# If input is invalid then nothing should be modified.
self._setup()
t = Table([self.a])
d = {"b": self.b, "c": [0]}
msg = "Inconsistent data column lengths: {1, 3}"
with pytest.raises(ValueError, match=msg):
t.update(d)
assert t.colnames == ["a"]
assert np.all(t["a"] == self.a)
assert d == {"b": self.b, "c": [0]}
def test_metadata_conflict(self):
self._setup()
t1 = Table([self.a], meta={"a": 0, "b": [0], "c": True})
t2 = Table([self.b], meta={"a": 1, "b": [1]})
t2meta = copy.deepcopy(t2.meta)
t1.update(t2)
assert t1.meta == {"a": 1, "b": [0, 1], "c": True}
# t2 metadata should not have changed.
assert t2.meta == t2meta
def test_update(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
t2["b"] += 1
t1.update(t2)
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b + 1)
assert np.all(t1["c"] == self.c)
# t2 should not have changed.
assert t2.colnames == ["b", "c"]
assert np.all(t2["b"] == self.b + 1)
assert np.all(t2["c"] == self.c)
d = {"b": list(self.b), "d": list(self.d)}
dc = copy.deepcopy(d)
t2.update(d)
assert t2.colnames == ["b", "c", "d"]
assert np.all(t2["b"] == self.b)
assert np.all(t2["c"] == self.c)
assert np.all(t2["d"] == self.d)
# d should not have changed.
assert d == dc
# Columns were copied, so changing t2 shouldn't have affected t1.
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b + 1)
assert np.all(t1["c"] == self.c)
def test_update_without_copy(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
t1.update(t2, copy=False)
t2["b"] -= 1
assert t1.colnames == ["a", "b", "c"]
assert np.all(t1["a"] == self.a)
assert np.all(t1["b"] == self.b - 1)
assert np.all(t1["c"] == self.c)
d = {"b": np.array(self.b), "d": np.array(self.d)}
t2.update(d, copy=False)
d["b"] *= 2
assert t2.colnames == ["b", "c", "d"]
assert np.all(t2["b"] == 2 * self.b)
assert np.all(t2["c"] == self.c)
assert np.all(t2["d"] == self.d)
def test_table_meta_copy():
"""
Test no copy vs light (key) copy vs deep copy of table meta for different
situations. #8404.
"""
t = table.Table([[1]])
meta = {1: [1, 2]}
# Assigning meta directly implies using direct object reference
t.meta = meta
assert t.meta is meta
# Table slice implies key copy, so values are unchanged
t2 = t[:]
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the list same object
# Table init with copy=False implies key copy
t2 = table.Table(t, copy=False)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the same list object
# Table init with copy=True implies deep copy
t2 = table.Table(t, copy=True)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object
def test_table_meta_copy_with_meta_arg():
"""
Test no copy vs light (key) copy vs deep copy of table meta when meta is
supplied as a table init argument. #8404.
"""
meta = {1: [1, 2]}
meta2 = {2: [3, 4]}
t = table.Table([[1]], meta=meta, copy=False)
assert t.meta is meta
t = table.Table([[1]], meta=meta) # default copy=True
assert t.meta is not meta
assert t.meta == meta
# Test initializing from existing table with meta with copy=False
t2 = table.Table(t, meta=meta2, copy=False)
assert t2.meta is meta2
assert t2.meta != t.meta # Change behavior in #8404
# Test initializing from existing table with meta with default copy=True
t2 = table.Table(t, meta=meta2)
assert t2.meta is not meta2
assert t2.meta != t.meta # Change behavior in #8404
# Table init with copy=True and empty dict meta gets that empty dict
t2 = table.Table(t, copy=True, meta={})
assert t2.meta == {}
# Table init with copy=True and kwarg meta=None gets the original table dict.
# This is a somewhat ambiguous case because it could be interpreted as the
# user wanting NO meta set on the output. This could be implemented by inspecting
# call args.
t2 = table.Table(t, copy=True, meta=None)
assert t2.meta == t.meta
# Test initializing empty table with meta with copy=False
t = table.Table(meta=meta, copy=False)
assert t.meta is meta
assert t.meta[1] is meta[1]
# Test initializing empty table with meta with default copy=True (deepcopy meta)
t = table.Table(meta=meta)
assert t.meta is not meta
assert t.meta == meta
assert t.meta[1] is not meta[1]
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=["a", "b"])
ta = t["a"]
tb = t["b"]
ta.info.meta = {"aa": [0, 1, 2, 3, 4]}
ta.info.format = "%f"
t.replace_column("a", a.to("cm"))
assert np.all(t["a"] == ta)
assert t["a"] is not ta # New a column
assert t["b"] is tb # Original b column unchanged
assert t.colnames == ["a", "b"]
assert t["a"].info.meta is None
assert t["a"].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=["a", "b"])
assert isinstance(t["a"], u.Quantity)
# Inplace update
ta = t["a"]
t["a"] = 5 * u.m
assert np.all(t["a"] == [5, 5] * u.m)
assert t["a"] is ta
# Replace
t["a"] = [5, 6]
assert np.all(t["a"] == [5, 6])
assert isinstance(t["a"], table.Column)
assert t["a"] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]):
t["a"] = 0 # in-place update
t["a"] = [10, 20, 30] # replace column
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
with table.conf.set_temp("replace_warnings", ["refcount", "attributes", "slice"]):
t2 = t[:2]
t2["a"] = 0 # in-place slice update
assert np.all(t["a"] == [0, 0, 3])
with pytest.warns(
TableReplaceWarning,
match="replaced column 'a' which looks like an array slice",
) as w:
t2["a"] = [10, 20] # replace slice
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
t["a"].unit = "m"
with pytest.warns(
TableReplaceWarning,
match=r"replaced column 'a' " r"and column attributes \['unit'\]",
) as w:
with table.conf.set_temp(
"replace_warnings", ["refcount", "attributes", "slice"]
):
t["a"] = [10, 20, 30]
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
ta = t["a"] # noqa: F841 : Generate an extra reference to original column
with pytest.warns(
TableReplaceWarning, match="replaced column 'a' and the number of references"
) as w:
with table.conf.set_temp(
"replace_warnings", ["refcount", "attributes", "slice"]
):
t["a"] = [10, 20, 30]
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
from inspect import currentframe, getframeinfo
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
with table.conf.set_temp("replace_warnings", ["always"]):
t["a"] = 0 # in-place slice update
with pytest.warns(TableReplaceWarning, match="replaced column 'a'") as w:
frameinfo = getframeinfo(currentframe())
t["a"] = [10, 20, 30] # replace column
assert len(w) == 1
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert "test_table" in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=["a", "b"])
ta = t["a"]
t["a"].unit = "m"
with table.conf.set_temp("replace_inplace", True):
with table.conf.set_temp(
"replace_warnings", ["always", "refcount", "attributes", "slice"]
):
t["a"] = 0 # in-place update
assert ta is t["a"]
t["a"] = [10, 20, 30] # normally replaces column, but not now
assert ta is t["a"]
assert np.all(t["a"] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=("a", "b"))
t.add_index("a")
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] == "a"
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
"""Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails."""
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
def test_create_table_from_final_row():
"""Regression test for issue #8422: passing the last row of a table into
Table should return a new table containing that row."""
t1 = table.Table([(1, 2)], names=["col"])
row = t1[-1]
t2 = table.Table(row)["col"]
assert t2[0] == 2
def test_key_values_in_as_array():
# Test for checking column slicing using key_values in Table.as_array()
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
# Creating a table with three columns
t1 = table.Table(
rows=data_rows,
names=("a", "b", "c"),
meta={"name": "first table"},
dtype=("i4", "f8", "S1"),
)
# Values of sliced column a,b is stored in a numpy array
a = np.array([(1, 2.0), (4, 5.0), (5, 8.2)], dtype=[("a", "<i4"), ("b", "<f8")])
# Values for sliced column c is stored in a numpy array
b = np.array([(b"x",), (b"y",), (b"z",)], dtype=[("c", "S1")])
# Comparing initialised array with sliced array using Table.as_array()
assert np.array_equal(a, t1.as_array(names=["a", "b"]))
assert np.array_equal(b, t1.as_array(names=["c"]))
def test_tolist():
t = table.Table(
[[1, 2, 3], [1.1, 2.2, 3.3], [b"foo", b"bar", b"hello"]], names=("a", "b", "c")
)
assert t["a"].tolist() == [1, 2, 3]
assert_array_equal(t["b"].tolist(), [1.1, 2.2, 3.3])
assert t["c"].tolist() == ["foo", "bar", "hello"]
assert isinstance(t["a"].tolist()[0], int)
assert isinstance(t["b"].tolist()[0], float)
assert isinstance(t["c"].tolist()[0], str)
t = table.Table(
[[[1, 2], [3, 4]], [[b"foo", b"bar"], [b"hello", b"world"]]], names=("a", "c")
)
assert t["a"].tolist() == [[1, 2], [3, 4]]
assert t["c"].tolist() == [["foo", "bar"], ["hello", "world"]]
assert isinstance(t["a"].tolist()[0][0], int)
assert isinstance(t["c"].tolist()[0][0], str)
class MyTable(Table):
foo = TableAttribute()
bar = TableAttribute(default=[])
baz = TableAttribute(default=1)
def test_table_attribute():
assert repr(MyTable.baz) == "<TableAttribute name=baz default=1>"
t = MyTable([[1, 2]])
# __attributes__ created on the fly on the first access of an attribute
# that has a non-None default.
assert "__attributes__" not in t.meta
assert t.foo is None
assert "__attributes__" not in t.meta
assert t.baz == 1
assert "__attributes__" in t.meta
t.bar.append(2.0)
assert t.bar == [2.0]
assert t.baz == 1
t.baz = "baz"
assert t.baz == "baz"
# Table attributes round-trip through pickle
tp = pickle.loads(pickle.dumps(t))
assert tp.foo is None
assert tp.baz == "baz"
assert tp.bar == [2.0]
# Allow initialization of attributes in table creation, with / without data
for data in None, [[1, 2]]:
t2 = MyTable(data, foo=3, bar="bar", baz="baz")
assert t2.foo == 3
assert t2.bar == "bar"
assert t2.baz == "baz"
# Initializing from an existing MyTable works, with and without kwarg attrs
t3 = MyTable(t2)
assert t3.foo == 3
assert t3.bar == "bar"
assert t3.baz == "baz"
t3 = MyTable(t2, foo=5, bar="fubar")
assert t3.foo == 5
assert t3.bar == "fubar"
assert t3.baz == "baz"
# Deleting attributes removes it from attributes
del t.baz
assert "baz" not in t.meta["__attributes__"]
del t.bar
assert "__attributes__" not in t.meta
def test_table_attribute_ecsv():
# Table attribute round-trip through ECSV
t = MyTable([[1, 2]], bar=[2.0], baz="baz")
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = MyTable.read(out.getvalue(), format="ascii.ecsv")
assert t2.foo is None
assert t2.bar == [2.0]
assert t2.baz == "baz"
def test_table_attribute_fail():
# Code raises ValueError(f'{attr} not allowed as TableAttribute') but in this
# context it gets re-raised as a RuntimeError during class definition.
with pytest.raises(RuntimeError, match="Error calling __set_name__"):
class MyTable2(Table):
descriptions = TableAttribute() # Conflicts with init arg
with pytest.raises(RuntimeError, match="Error calling __set_name__"):
class MyTable3(Table):
colnames = TableAttribute() # Conflicts with built-in property
def test_set_units_fail():
dat = [[1.0, 2.0], ["aa", "bb"]]
with pytest.raises(
ValueError, match="sequence of unit values must match number of columns"
):
Table(dat, units=[u.m])
with pytest.raises(
ValueError, match="invalid column name c for setting unit attribute"
):
Table(dat, units={"c": u.m})
def test_set_units():
dat = [[1.0, 2.0], ["aa", "bb"], [3, 4]]
exp_units = (u.m, None, None)
for cls in Table, QTable:
for units in ({"a": u.m, "c": ""}, exp_units):
qt = cls(dat, units=units, names=["a", "b", "c"])
if cls is QTable:
assert isinstance(qt["a"], u.Quantity)
assert isinstance(qt["b"], table.Column)
assert isinstance(qt["c"], table.Column)
for col, unit in zip(qt.itercols(), exp_units):
assert col.info.unit is unit
def test_set_descriptions():
dat = [[1.0, 2.0], ["aa", "bb"]]
exp_descriptions = ("my description", None)
for cls in Table, QTable:
for descriptions in ({"a": "my description"}, exp_descriptions):
qt = cls(dat, descriptions=descriptions, names=["a", "b"])
for col, description in zip(qt.itercols(), exp_descriptions):
assert col.info.description == description
def test_set_units_from_row():
text = ["a,b", ",s", "1,2", "3,4"]
units = Table.read(text, format="ascii", data_start=1, data_end=2)[0]
t = Table.read(text, format="ascii", data_start=2, units=units)
assert isinstance(units, table.Row)
assert t["a"].info.unit is None
assert t["b"].info.unit is u.s
def test_set_units_descriptions_read():
"""Test setting units and descriptions via Table.read. The test here
is less comprehensive because the implementation is exactly the same
as for Table.__init__ (calling Table._set_column_attribute)"""
for cls in Table, QTable:
t = cls.read(
["a b", "1 2"],
format="ascii",
units=[u.m, u.s],
descriptions=["hi", "there"],
)
assert t["a"].info.unit is u.m
assert t["b"].info.unit is u.s
assert t["a"].info.description == "hi"
assert t["b"].info.description == "there"
def test_broadcasting_8933():
"""Explicitly check re-work of code related to broadcasting in #8933"""
t = table.Table([[1, 2]]) # Length=2 table
t["a"] = [[3, 4]] # Can broadcast if ndim > 1 and shape[0] == 1
t["b"] = 5
t["c"] = [1] # Treat as broadcastable scalar, not length=1 array (which would fail)
assert np.all(t["a"] == [[3, 4], [3, 4]])
assert np.all(t["b"] == [5, 5])
assert np.all(t["c"] == [1, 1])
# Test that broadcasted column is writeable
t["c"][1] = 10
assert np.all(t["c"] == [1, 10])
def test_custom_masked_column_in_nonmasked_table():
"""Test the refactor and change in column upgrades introduced
in 95902650f. This fixes a regression introduced by #8789
(Change behavior of Table regarding masked columns)."""
class MyMaskedColumn(table.MaskedColumn):
pass
class MySubMaskedColumn(MyMaskedColumn):
pass
class MyColumn(table.Column):
pass
class MySubColumn(MyColumn):
pass
class MyTable(table.Table):
Column = MyColumn
MaskedColumn = MyMaskedColumn
a = table.Column([1])
b = table.MaskedColumn([2], mask=[True])
c = MyMaskedColumn([3], mask=[True])
d = MySubColumn([4])
e = MySubMaskedColumn([5], mask=[True])
# Two different pathways for making table
t1 = MyTable([a, b, c, d, e], names=["a", "b", "c", "d", "e"])
t2 = MyTable()
t2["a"] = a
t2["b"] = b
t2["c"] = c
t2["d"] = d
t2["e"] = e
for t in (t1, t2):
assert type(t["a"]) is MyColumn
assert type(t["b"]) is MyMaskedColumn # upgrade
assert type(t["c"]) is MyMaskedColumn
assert type(t["d"]) is MySubColumn
assert type(t["e"]) is MySubMaskedColumn # sub-class not downgraded
def test_sort_with_mutable_skycoord():
"""Test sorting a table that has a mutable column such as SkyCoord.
In this case the sort is done in-place
"""
t = Table([[2, 1], SkyCoord([4, 3], [6, 5], unit="deg,deg")], names=["a", "sc"])
meta = {"a": [1, 2]}
ta = t["a"]
tsc = t["sc"]
t["sc"].info.meta = meta
t.sort("a")
assert np.all(t["a"] == [1, 2])
assert np.allclose(t["sc"].ra.to_value(u.deg), [3, 4])
assert np.allclose(t["sc"].dec.to_value(u.deg), [5, 6])
assert t["a"] is ta
assert t["sc"] is tsc
# Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1
# it is a reference.
t["sc"].info.meta["a"][0] = 100
assert meta["a"][0] == 100
def test_sort_with_non_mutable():
"""Test sorting a table that has a non-mutable column."""
t = Table([[2, 1], [3, 4]], names=["a", "b"])
ta = t["a"]
tb = t["b"]
t["b"].setflags(write=False)
meta = {"a": [1, 2]}
t["b"].info.meta = meta
t.sort("a")
assert np.all(t["a"] == [1, 2])
assert np.all(t["b"] == [4, 3])
assert ta is t["a"]
assert tb is not t["b"]
# Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1
# it is a reference.
t["b"].info.meta["a"][0] = 100
assert meta["a"][0] == 1
def test_init_with_list_of_masked_arrays():
"""Test the fix for #8977"""
m0 = np.ma.array([0, 1, 2], mask=[True, False, True])
m1 = np.ma.array([3, 4, 5], mask=[False, True, False])
mc = [m0, m1]
# Test _init_from_list
t = table.Table([mc], names=["a"])
# Test add_column
t["b"] = [m1, m0]
assert t["a"].shape == (2, 3)
assert np.all(t["a"][0] == m0)
assert np.all(t["a"][1] == m1)
assert np.all(t["a"][0].mask == m0.mask)
assert np.all(t["a"][1].mask == m1.mask)
assert t["b"].shape == (2, 3)
assert np.all(t["b"][0] == m1)
assert np.all(t["b"][1] == m0)
assert np.all(t["b"][0].mask == m1.mask)
assert np.all(t["b"][1].mask == m0.mask)
def test_data_to_col_convert_strategy():
"""Test the update to how data_to_col works (#8972), using the regression
example from #8971.
"""
t = table.Table([[0, 1]])
t["a"] = 1
t["b"] = np.int64(2) # Failed previously
assert np.all(t["a"] == [1, 1])
assert np.all(t["b"] == [2, 2])
def test_structured_masked_column():
"""Test that adding a masked ndarray with a structured dtype works"""
dtype = np.dtype([("z", "f8"), ("x", "f8"), ("y", "i4")])
t = Table()
t["a"] = np.ma.array(
[
(1, 2, 3),
(4, 5, 6),
],
mask=[
(False, False, True),
(False, True, False),
],
dtype=dtype,
)
assert np.all(t["a"]["z"].mask == [False, False])
assert np.all(t["a"]["x"].mask == [False, True])
assert np.all(t["a"]["y"].mask == [True, False])
assert isinstance(t["a"], MaskedColumn)
def test_rows_with_mixins():
"""Test for #9165 to allow adding a list of mixin objects.
Also test for fix to #9357 where group_by() failed due to
mixin object not having info.indices set to [].
"""
tm = Time([1, 2], format="cxcsec")
q = [1, 2] * u.m
mixed1 = [1 * u.m, 2] # Mixed input, fails to convert to Quantity
mixed2 = [2, 1 * u.m] # Mixed input, not detected as potential mixin
rows = [
(1, q[0], tm[0]),
(2, q[1], tm[1]),
]
t = table.QTable(rows=rows)
t["a"] = [q[0], q[1]]
t["b"] = [tm[0], tm[1]]
t["m1"] = mixed1
t["m2"] = mixed2
assert np.all(t["col1"] == q)
assert np.all(t["col2"] == tm)
assert np.all(t["a"] == q)
assert np.all(t["b"] == tm)
assert np.all(t["m1"][ii] == mixed1[ii] for ii in range(2))
assert np.all(t["m2"][ii] == mixed2[ii] for ii in range(2))
assert type(t["m1"]) is table.Column
assert t["m1"].dtype is np.dtype(object)
assert type(t["m2"]) is table.Column
assert t["m2"].dtype is np.dtype(object)
# Ensure group_by() runs without failing for sortable columns.
# The columns 'm1', and 'm2' are object dtype and not sortable.
for name in ["col0", "col1", "col2", "a", "b"]:
t.group_by(name)
# For good measure include exactly the failure in #9357 in which the
# list of Time() objects is in the Table initializer.
mjds = [Time(58000, format="mjd")]
t = Table([mjds, ["gbt"]], names=("mjd", "obs"))
t.group_by("obs")
def test_iterrows():
dat = [
(1, 2, 3),
(4, 5, 6),
(7, 8, 6),
]
t = table.Table(rows=dat, names=("a", "b", "c"))
c_s = []
a_s = []
for c, a in t.iterrows("c", "a"):
a_s.append(a)
c_s.append(c)
assert np.all(t["a"] == a_s)
assert np.all(t["c"] == c_s)
rows = [row for row in t.iterrows()]
assert rows == dat
with pytest.raises(ValueError, match="d is not a valid column name"):
t.iterrows("d")
def test_values_and_types():
dat = [
(1, 2, 3),
(4, 5, 6),
(7, 8, 6),
]
t = table.Table(rows=dat, names=("a", "b", "c"))
assert isinstance(t.values(), type(OrderedDict().values()))
assert isinstance(t.columns.values(), type(OrderedDict().values()))
assert isinstance(t.columns.keys(), type(OrderedDict().keys()))
for i in t.values():
assert isinstance(i, table.column.Column)
def test_items():
dat = [
(1, 2, 3),
(4, 5, 6),
(7, 8, 9),
]
t = table.Table(rows=dat, names=("a", "b", "c"))
assert isinstance(t.items(), type(OrderedDict({}).items()))
for i in list(t.items()):
assert isinstance(i, tuple)
def test_read_write_not_replaceable():
t = table.Table()
with pytest.raises(AttributeError):
t.read = "fake_read"
with pytest.raises(AttributeError):
t.write = "fake_write"
def test_keep_columns_with_generator():
# Regression test for #12529
t = table.table_helpers.simple_table(1)
t.keep_columns(col for col in t.colnames if col == "a")
assert t.colnames == ["a"]
def test_remove_columns_with_generator():
# Regression test for #12529
t = table.table_helpers.simple_table(1)
t.remove_columns(col for col in t.colnames if col == "a")
assert t.colnames == ["b", "c"]
def test_keep_columns_invalid_names_messages():
t = table.table_helpers.simple_table(1)
with pytest.raises(KeyError, match='column "d" does not exist'):
t.keep_columns(["c", "d"])
with pytest.raises(KeyError, match="columns {'[de]', '[de]'} do not exist"):
t.keep_columns(["c", "d", "e"])
def test_remove_columns_invalid_names_messages():
t = table.table_helpers.simple_table(1)
with pytest.raises(KeyError, match='column "d" does not exist'):
t.remove_columns(["c", "d"])
with pytest.raises(KeyError, match="columns {'[de]', '[de]'} do not exist"):
t.remove_columns(["c", "d", "e"])
@pytest.mark.parametrize("path_type", ["str", "Path"])
def test_read_write_tilde_path(path_type, home_is_tmpdir):
if path_type == "str":
test_file = os.path.join("~", "test.csv")
else:
test_file = pathlib.Path("~", "test.csv")
t1 = Table()
t1["a"] = [1, 2, 3]
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == [1, 2, 3])
# Ensure the data wasn't written to the literal tilde-prefixed path
assert not os.path.exists(test_file)
def test_add_list_order():
t = Table()
names = list(map(str, range(20)))
array = np.empty((20, 1))
t.add_columns(array, names=names)
assert t.colnames == names
|
4a0b8f5e2d95199085229e81105e9c0ce9d7e7acc107dbba0e0021cf898423b9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
from contextlib import nullcontext
import numpy as np
import pytest
from astropy import table
from astropy import units as u
from astropy.coordinates import (
BaseRepresentationOrDifferential,
CartesianRepresentation,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
search_around_3d,
)
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.coordinates.tests.test_representation import representation_equal
from astropy.table import Column, MaskedColumn, QTable, Table, TableMergeError
from astropy.table.operations import _get_out_class, join_distance, join_skycoord
from astropy.time import Time, TimeDelta
from astropy.units.quantity import Quantity
from astropy.utils import metadata
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.metadata import MergeConflictError
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def check_mask(col, exp_mask):
"""Check that col.mask == exp_mask"""
if hasattr(col, "mask"):
# Coerce expected mask into dtype of col.mask. In particular this is
# needed for types like EarthLocation where the mask is a structured
# array.
exp_mask = np.array(exp_mask).astype(col.mask.dtype)
out = np.all(col.mask == exp_mask)
else:
# With no mask the check is OK if all the expected mask values
# are False (i.e. no auto-conversion to MaskedQuantity if it was
# not required by the join).
out = np.all(exp_mask == False)
return out
class TestJoin:
def _setup(self, t_cls=Table):
lines1 = [
" a b c ",
" 0 foo L1",
" 1 foo L2",
" 1 bar L3",
" 2 bar L4",
]
lines2 = [
" a b d ",
" 1 foo R1",
" 1 foo R2",
" 2 bar R3",
" 4 bar R4",
]
self.t1 = t_cls.read(lines1, format="ascii")
self.t2 = t_cls.read(lines2, format="ascii")
self.t3 = t_cls(self.t2, copy=True)
self.t1.meta.update(OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]))
self.t2.meta.update(OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]))
self.t3.meta.update(OrderedDict([("b", 3), ("c", [1, 2]), ("d", 2), ("a", 1)]))
self.meta_merge = OrderedDict(
[
("b", [1, 2, 3, 4]),
("c", {"a": 1, "b": 1}),
("d", 1),
("a", 1),
]
)
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.join(self.t1, self.t2, join_type="inner")
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.join(self.t1, self.t3, join_type="inner")
assert len(w) == 3
assert out.meta == self.t3.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.join(
self.t1, self.t3, join_type="inner", metadata_conflicts="warn"
)
assert len(w) == 3
assert out.meta == self.t3.meta
out = table.join(
self.t1, self.t3, join_type="inner", metadata_conflicts="silent"
)
assert out.meta == self.t3.meta
with pytest.raises(MergeConflictError):
out = table.join(
self.t1, self.t3, join_type="inner", metadata_conflicts="error"
)
with pytest.raises(ValueError):
out = table.join(
self.t1, self.t3, join_type="inner", metadata_conflicts="nonsense"
)
def test_both_unmasked_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Basic join with default parameters (inner join on common keys)
t12 = table.join(t1, t2)
assert type(t12) is operation_table_type
assert type(t12["a"]) is type(t1["a"]) # noqa: E721
assert type(t12["b"]) is type(t1["b"]) # noqa: E721
assert type(t12["c"]) is type(t1["c"]) # noqa: E721
assert type(t12["d"]) is type(t2["d"]) # noqa: E721
assert t12.masked is False
assert sort_eq(
t12.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 1 foo L2 R1",
" 1 foo L2 R2",
" 2 bar L4 R3",
],
)
# Table meta merged properly
assert t12.meta == self.meta_merge
def test_both_unmasked_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type="left")
assert t12.has_masked_columns is True
assert t12.masked is False
for name in ("a", "b", "c"):
assert type(t12[name]) is Column
assert type(t12["d"]) is MaskedColumn
assert sort_eq(
t12.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 foo L1 --",
" 1 bar L3 --",
" 1 foo L2 R1",
" 1 foo L2 R2",
" 2 bar L4 R3",
],
)
# Right join
t12 = table.join(t1, t2, join_type="right")
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(
t12.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 1 foo L2 R1",
" 1 foo L2 R2",
" 2 bar L4 R3",
" 4 bar -- R4",
],
)
# Outer join
t12 = table.join(t1, t2, join_type="outer")
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(
t12.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 foo L1 --",
" 1 bar L3 --",
" 1 foo L2 R1",
" 1 foo L2 R2",
" 2 bar L4 R3",
" 4 bar -- R4",
],
)
# Check that the common keys are 'a', 'b'
t12a = table.join(t1, t2, join_type="outer")
t12b = table.join(t1, t2, join_type="outer", keys=["a", "b"])
assert np.all(t12a.as_array() == t12b.as_array())
def test_both_unmasked_single_key_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Inner join on 'a' column
t12 = table.join(t1, t2, keys="a")
assert type(t12) is operation_table_type
assert type(t12["a"]) is type(t1["a"]) # noqa: E721
assert type(t12["b_1"]) is type(t1["b"]) # noqa: E721
assert type(t12["c"]) is type(t1["c"]) # noqa: E721
assert type(t12["b_2"]) is type(t2["b"]) # noqa: E721
assert type(t12["d"]) is type(t2["d"]) # noqa: E721
assert t12.masked is False
assert sort_eq(
t12.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 1 foo L2 foo R1",
" 1 foo L2 foo R2",
" 1 bar L3 foo R1",
" 1 bar L3 foo R2",
" 2 bar L4 bar R3",
],
)
def test_both_unmasked_single_key_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type="left", keys="a")
assert t12.has_masked_columns is True
assert sort_eq(
t12.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 0 foo L1 -- --",
" 1 foo L2 foo R1",
" 1 foo L2 foo R2",
" 1 bar L3 foo R1",
" 1 bar L3 foo R2",
" 2 bar L4 bar R3",
],
)
# Right join
t12 = table.join(t1, t2, join_type="right", keys="a")
assert t12.has_masked_columns is True
assert sort_eq(
t12.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 1 foo L2 foo R1",
" 1 foo L2 foo R2",
" 1 bar L3 foo R1",
" 1 bar L3 foo R2",
" 2 bar L4 bar R3",
" 4 -- -- bar R4",
],
)
# Outer join
t12 = table.join(t1, t2, join_type="outer", keys="a")
assert t12.has_masked_columns is True
assert sort_eq(
t12.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 0 foo L1 -- --",
" 1 foo L2 foo R1",
" 1 foo L2 foo R2",
" 1 bar L3 foo R1",
" 1 bar L3 foo R2",
" 2 bar L4 bar R3",
" 4 -- -- bar R4",
],
)
def test_masked_unmasked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
# Result table is never masked
t1m2 = table.join(t1m, t2, join_type="inner")
assert t1m2.masked is False
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2))
# Mask out some values in left table and make sure they propagate
t1m["b"].mask[1] = True
t1m["c"].mask[2] = True
t1m2 = table.join(t1m, t2, join_type="inner", keys="a")
assert sort_eq(
t1m2.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 1 -- L2 foo R1",
" 1 -- L2 foo R2",
" 1 bar -- foo R1",
" 1 bar -- foo R2",
" 2 bar L4 bar R3",
],
)
t21m = table.join(t2, t1m, join_type="inner", keys="a")
assert sort_eq(
t21m.pformat(),
[
" a b_1 d b_2 c ",
"--- --- --- --- ---",
" 1 foo R2 -- L2",
" 1 foo R2 bar --",
" 1 foo R1 -- L2",
" 1 foo R1 bar --",
" 2 bar R3 bar L4",
],
)
def test_masked_masked(self, operation_table_type):
self._setup(operation_table_type)
"""Two masked tables"""
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
t2m = operation_table_type(self.t2, masked=True)
# Result table is never masked but original column types are preserved
t1m2m = table.join(t1m, t2m, join_type="inner")
assert t1m2m.masked is False
for col in t1m2m.itercols():
assert type(col) is MaskedColumn
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2m))
# Mask out some values in both tables and make sure they propagate
t1m["b"].mask[1] = True
t1m["c"].mask[2] = True
t2m["d"].mask[2] = True
t1m2m = table.join(t1m, t2m, join_type="inner", keys="a")
assert sort_eq(
t1m2m.pformat(),
[
" a b_1 c b_2 d ",
"--- --- --- --- ---",
" 1 -- L2 foo R1",
" 1 -- L2 foo R2",
" 1 bar -- foo R1",
" 1 bar -- foo R2",
" 2 bar L4 bar --",
],
)
def test_classes(self):
"""Ensure that classes and subclasses get through as expected"""
class MyCol(Column):
pass
class MyMaskedCol(MaskedColumn):
pass
t1 = Table()
t1["a"] = MyCol([1])
t1["b"] = MyCol([2])
t1["c"] = MyMaskedCol([3])
t2 = Table()
t2["a"] = Column([1, 2])
t2["d"] = MyCol([3, 4])
t2["e"] = MyMaskedCol([5, 6])
t12 = table.join(t1, t2, join_type="inner")
for name, exp_type in (
("a", MyCol),
("b", MyCol),
("c", MyMaskedCol),
("d", MyCol),
("e", MyMaskedCol),
):
assert type(t12[name] is exp_type)
t21 = table.join(t2, t1, join_type="left")
# Note col 'b' gets upgraded from MyCol to MaskedColumn since it needs to be
# masked, but col 'c' stays since MyMaskedCol supports masking.
for name, exp_type in (
("a", MyCol),
("b", MaskedColumn),
("c", MyMaskedCol),
("d", MyCol),
("e", MyMaskedCol),
):
assert type(t21[name] is exp_type)
def test_col_rename(self, operation_table_type):
self._setup(operation_table_type)
"""
Test auto col renaming when there is a conflict. Use
non-default values of uniq_col_name and table_names.
"""
t1 = self.t1
t2 = self.t2
t12 = table.join(
t1,
t2,
uniq_col_name="x_{table_name}_{col_name}_y",
table_names=["L", "R"],
keys="a",
)
assert t12.colnames == ["a", "x_L_b_y", "c", "x_R_b_y", "d"]
def test_rename_conflict(self, operation_table_type):
self._setup(operation_table_type)
"""
Test that auto-column rename fails because of a conflict
with an existing column
"""
t1 = self.t1
t2 = self.t2
t1["b_1"] = 1 # Add a new column b_1 that will conflict with auto-rename
with pytest.raises(TableMergeError):
table.join(t1, t2, keys="a")
def test_missing_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that doesn't exist"""
t1 = self.t1
t2 = self.t2
with pytest.raises(TableMergeError):
table.join(t1, t2, keys=["a", "not there"])
def test_bad_join_type(self, operation_table_type):
self._setup(operation_table_type)
"""Bad join_type input"""
t1 = self.t1
t2 = self.t2
with pytest.raises(ValueError):
table.join(t1, t2, join_type="illegal value")
def test_no_common_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge tables with no common keys"""
t1 = self.t1
t2 = self.t2
del t1["a"]
del t1["b"]
del t2["a"]
del t2["b"]
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_masked_key_column(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that has a masked element"""
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
t1 = self.t1
t2 = operation_table_type(self.t2, masked=True)
table.join(t1, t2) # OK
t2["a"].mask[0] = True
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t2.rename_column("d", "c") # force col conflict and renaming
meta1 = OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])
meta2 = OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
# Key col 'a', should first value ('cm')
t1["a"].unit = "cm"
t2["a"].unit = "m"
# Key col 'b', take first value 't1_b'
t1["b"].info.description = "t1_b"
# Key col 'b', take first non-empty value 't1_b'
t2["b"].info.format = "%6s"
# Key col 'a', should be merged meta
t1["a"].info.meta = meta1
t2["a"].info.meta = meta2
# Key col 'b', should be meta2
t2["b"].info.meta = meta2
# All these should pass through
t1["c"].info.format = "%3s"
t1["c"].info.description = "t1_c"
t2["c"].info.format = "%6s"
t2["c"].info.description = "t2_c"
if operation_table_type is Table:
ctx = pytest.warns(
metadata.MergeConflictWarning,
match=(
r"In merged column 'a' the 'unit' attribute does not match \(cm"
r" != m\)"
),
)
else:
ctx = nullcontext()
with ctx:
t12 = table.join(t1, t2, keys=["a", "b"])
assert t12["a"].unit == "m"
assert t12["b"].info.description == "t1_b"
assert t12["b"].info.format == "%6s"
assert t12["a"].info.meta == self.meta_merge
assert t12["b"].info.meta == meta2
assert t12["c_1"].info.format == "%3s"
assert t12["c_1"].info.description == "t1_c"
assert t12["c_2"].info.format == "%6s"
assert t12["c_2"].info.description == "t2_c"
def test_join_multidimensional(self, operation_table_type):
self._setup(operation_table_type)
# Regression test for #2984, which was an issue where join did not work
# on multi-dimensional columns.
t1 = operation_table_type()
t1["a"] = [1, 2, 3]
t1["b"] = np.ones((3, 4))
t2 = operation_table_type()
t2["a"] = [1, 2, 3]
t2["c"] = [4, 5, 6]
t3 = table.join(t1, t2)
np.testing.assert_allclose(t3["a"], t1["a"])
np.testing.assert_allclose(t3["b"], t1["b"])
np.testing.assert_allclose(t3["c"], t2["c"])
def test_join_multidimensional_masked(self, operation_table_type):
self._setup(operation_table_type)
"""
Test for outer join with multidimensional columns where masking is required.
(Issue #4059).
"""
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
a = table.MaskedColumn([1, 2, 3], name="a")
a2 = table.Column([1, 3, 4], name="a")
b = table.MaskedColumn(
[
[1, 2],
[3, 4],
[5, 6],
],
name="b",
mask=[
[1, 0],
[0, 1],
[0, 0],
],
)
c = table.Column(
[
[1, 1],
[2, 2],
[3, 3],
],
name="c",
)
t1 = operation_table_type([a, b])
t2 = operation_table_type([a2, c])
t12 = table.join(t1, t2, join_type="inner")
assert np.all(
t12["b"].mask
== [
[True, False],
[False, False],
]
)
assert not hasattr(t12["c"], "mask")
t12 = table.join(t1, t2, join_type="outer")
assert np.all(
t12["b"].mask
== [
[True, False],
[False, True],
[False, False],
[True, True],
]
)
assert np.all(
t12["c"].mask
== [
[False, False],
[True, True],
[False, False],
[False, False],
]
)
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols["m"]
cls_name = type(col).__name__
len_col = len(col)
idx = np.arange(len_col)
t1 = table.QTable([idx, col], names=["idx", "m1"])
t2 = table.QTable([idx, col], names=["idx", "m2"])
# Set up join mismatches for different join_type cases
t1 = t1[[0, 1, 3]]
t2 = t2[[0, 2, 3]]
# Test inner join, which works for all mixin_cols
out = table.join(t1, t2, join_type="inner")
assert len(out) == 2
assert out["m2"].__class__ is col.__class__
assert np.all(out["idx"] == [0, 3])
if cls_name == "SkyCoord":
# SkyCoord doesn't support __eq__ so use our own
assert skycoord_equal(out["m1"], col[[0, 3]])
assert skycoord_equal(out["m2"], col[[0, 3]])
elif "Repr" in cls_name or "Diff" in cls_name:
assert np.all(representation_equal(out["m1"], col[[0, 3]]))
assert np.all(representation_equal(out["m2"], col[[0, 3]]))
else:
assert np.all(out["m1"] == col[[0, 3]])
assert np.all(out["m2"] == col[[0, 3]])
# Check for left, right, outer join which requires masking. Works for
# the listed mixins classes.
if isinstance(col, (Quantity, Time, TimeDelta)):
out = table.join(t1, t2, join_type="left")
assert len(out) == 3
assert np.all(out["idx"] == [0, 1, 3])
assert np.all(out["m1"] == t1["m1"])
assert np.all(out["m2"] == t2["m2"])
check_mask(out["m1"], [False, False, False])
check_mask(out["m2"], [False, True, False])
out = table.join(t1, t2, join_type="right")
assert len(out) == 3
assert np.all(out["idx"] == [0, 2, 3])
assert np.all(out["m1"] == t1["m1"])
assert np.all(out["m2"] == t2["m2"])
check_mask(out["m1"], [False, True, False])
check_mask(out["m2"], [False, False, False])
out = table.join(t1, t2, join_type="outer")
assert len(out) == 4
assert np.all(out["idx"] == [0, 1, 2, 3])
assert np.all(out["m1"] == col)
assert np.all(out["m2"] == col)
assert check_mask(out["m1"], [False, False, True, False])
assert check_mask(out["m2"], [False, True, False, False])
else:
# Otherwise make sure it fails with the right exception message
for join_type in ("outer", "left", "right"):
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type=join_type)
assert "join requires masking" in str(
err.value
) or "join unavailable" in str(err.value)
def test_cartesian_join(self, operation_table_type):
t1 = Table(rows=[(1, "a"), (2, "b")], names=["a", "b"])
t2 = Table(rows=[(3, "c"), (4, "d")], names=["a", "c"])
t12 = table.join(t1, t2, join_type="cartesian")
assert t1.colnames == ["a", "b"]
assert t2.colnames == ["a", "c"]
assert len(t12) == len(t1) * len(t2)
assert str(t12).splitlines() == [
"a_1 b a_2 c ",
"--- --- --- ---",
" 1 a 3 c",
" 1 a 4 d",
" 2 b 3 c",
" 2 b 4 d",
]
with pytest.raises(ValueError, match="cannot supply keys for a cartesian join"):
t12 = table.join(t1, t2, join_type="cartesian", keys="a")
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_skycoord_sky(self):
sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit="deg")
sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit="deg")
t1 = Table([sc1], names=["sc"])
t2 = Table([sc2], names=["sc"])
t12 = table.join(t1, t2, join_funcs={"sc": join_skycoord(0.2 * u.deg)})
exp = [
"sc_id sc_1 sc_2 ",
" deg,deg deg,deg ",
"----- ------- --------",
" 1 1.0,0.0 1.05,0.0",
" 1 1.1,0.0 1.05,0.0",
" 2 2.0,0.0 2.1,0.0",
]
assert str(t12).splitlines() == exp
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("distance_func", ["search_around_3d", search_around_3d])
def test_join_with_join_skycoord_3d(self, distance_func):
sc1 = SkyCoord([0, 1, 1.1, 2] * u.deg, [0, 0, 0, 0] * u.deg, [1, 1, 2, 1] * u.m)
sc2 = SkyCoord([0.5, 1.05, 2.1] * u.deg, [0, 0, 0] * u.deg, [1, 1, 1] * u.m)
t1 = Table([sc1], names=["sc"])
t2 = Table([sc2], names=["sc"])
join_func = join_skycoord(np.deg2rad(0.2) * u.m, distance_func=distance_func)
t12 = table.join(t1, t2, join_funcs={"sc": join_func})
exp = [
"sc_id sc_1 sc_2 ",
" deg,deg,m deg,deg,m ",
"----- ----------- ------------",
" 1 1.0,0.0,1.0 1.05,0.0,1.0",
" 2 2.0,0.0,1.0 2.1,0.0,1.0",
]
assert str(t12).splitlines() == exp
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_distance_1d(self):
c1 = [0, 1, 1.1, 2]
c2 = [0.5, 1.05, 2.1]
t1 = Table([c1], names=["col"])
t2 = Table([c2], names=["col"])
join_func = join_distance(
0.2, kdtree_args={"leafsize": 32}, query_args={"p": 2}
)
t12 = table.join(t1, t2, join_type="outer", join_funcs={"col": join_func})
exp = [
"col_id col_1 col_2",
"------ ----- -----",
" 1 1.0 1.05",
" 1 1.1 1.05",
" 2 2.0 2.1",
" 3 0.0 --",
" 4 -- 0.5",
]
assert str(t12).splitlines() == exp
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_distance_1d_multikey(self):
from astropy.table.operations import _apply_join_funcs
c1 = [0, 1, 1.1, 1.2, 2]
id1 = [0, 1, 2, 2, 3]
o1 = ["a", "b", "c", "d", "e"]
c2 = [0.5, 1.05, 2.1]
id2 = [0, 2, 4]
o2 = ["z", "y", "x"]
t1 = Table([c1, id1, o1], names=["col", "id", "o1"])
t2 = Table([c2, id2, o2], names=["col", "id", "o2"])
join_func = join_distance(0.2)
join_funcs = {"col": join_func}
t12 = table.join(t1, t2, join_type="outer", join_funcs=join_funcs)
exp = [
"col_id col_1 id o1 col_2 o2",
"------ ----- --- --- ----- ---",
" 1 1.0 1 b -- --",
" 1 1.1 2 c 1.05 y",
" 1 1.2 2 d 1.05 y",
" 2 2.0 3 e -- --",
" 2 -- 4 -- 2.1 x",
" 3 0.0 0 a -- --",
" 4 -- 0 -- 0.5 z",
]
assert str(t12).splitlines() == exp
left, right, keys = _apply_join_funcs(t1, t2, ("col", "id"), join_funcs)
assert keys == ("col_id", "id")
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_distance_1d_quantity(self):
c1 = [0, 1, 1.1, 2] * u.m
c2 = [500, 1050, 2100] * u.mm
t1 = QTable([c1], names=["col"])
t2 = QTable([c2], names=["col"])
join_func = join_distance(20 * u.cm)
t12 = table.join(t1, t2, join_funcs={"col": join_func})
exp = [
"col_id col_1 col_2 ",
" m mm ",
"------ ----- ------",
" 1 1.0 1050.0",
" 1 1.1 1050.0",
" 2 2.0 2100.0",
]
assert str(t12).splitlines() == exp
# Generate column name conflict
t2["col_id"] = [0, 0, 0]
t2["col__id"] = [0, 0, 0]
t12 = table.join(t1, t2, join_funcs={"col": join_func})
exp = [
"col___id col_1 col_2 col_id col__id",
" m mm ",
"-------- ----- ------ ------ -------",
" 1 1.0 1050.0 0 0",
" 1 1.1 1050.0 0 0",
" 2 2.0 2100.0 0 0",
]
assert str(t12).splitlines() == exp
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_join_with_join_distance_2d(self):
c1 = np.array([[0, 1, 1.1, 2], [0, 0, 1, 0]]).transpose()
c2 = np.array([[0.5, 1.05, 2.1], [0, 0, 0]]).transpose()
t1 = Table([c1], names=["col"])
t2 = Table([c2], names=["col"])
join_func = join_distance(
0.2, kdtree_args={"leafsize": 32}, query_args={"p": 2}
)
t12 = table.join(t1, t2, join_type="outer", join_funcs={"col": join_func})
exp = [
"col_id col_1 col_2 ",
f'{t12["col_id"].dtype.name} float64[2] float64[2]', # int32 or int64
"------ ---------- -----------",
" 1 1.0 .. 0.0 1.05 .. 0.0",
" 2 2.0 .. 0.0 2.1 .. 0.0",
" 3 0.0 .. 0.0 -- .. --",
" 4 1.1 .. 1.0 -- .. --",
" 5 -- .. -- 0.5 .. 0.0",
]
assert t12.pformat(show_dtype=True) == exp
def test_keys_left_right_basic(self):
"""Test using the keys_left and keys_right args to specify different
join keys. This takes the standard test case but renames column 'a'
to 'x' and 'y' respectively for tables 1 and 2. Then it compares the
normal join on 'a' to the new join on 'x' and 'y'."""
self._setup()
for join_type in ("inner", "left", "right", "outer"):
t1 = self.t1.copy()
t2 = self.t2.copy()
# Expected is same as joining on 'a' but with names 'x', 'y' instead
t12_exp = table.join(t1, t2, keys="a", join_type=join_type)
t12_exp.add_column(t12_exp["a"], name="x", index=1)
t12_exp.add_column(t12_exp["a"], name="y", index=len(t1.colnames) + 1)
del t12_exp["a"]
# Different key names
t1.rename_column("a", "x")
t2.rename_column("a", "y")
keys_left_list = ["x"] # Test string key name
keys_right_list = [["y"]] # Test list of string key names
if join_type == "outer":
# Just do this for the outer join (others are the same)
keys_left_list.append([t1["x"].tolist()]) # Test list key column
keys_right_list.append([t2["y"]]) # Test Column key column
for keys_left, keys_right in zip(keys_left_list, keys_right_list):
t12 = table.join(
t1,
t2,
keys_left=keys_left,
keys_right=keys_right,
join_type=join_type,
)
assert t12.colnames == t12_exp.colnames
for col in t12.values_equal(t12_exp).itercols():
assert np.all(col)
assert t12_exp.meta == t12.meta
def test_keys_left_right_exceptions(self):
"""Test exceptions using the keys_left and keys_right args to specify
different join keys.
"""
self._setup()
t1 = self.t1
t2 = self.t2
msg = r"left table does not have key column 'z'"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left="z", keys_right=["a"])
msg = r"left table has different length from key \[1, 2\]"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=[[1, 2]], keys_right=["a"])
msg = r"keys arg must be None if keys_left and keys_right are supplied"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left="z", keys_right=["a"], keys="a")
msg = r"keys_left and keys_right args must have same length"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=["a", "b"], keys_right=["a"])
msg = r"keys_left and keys_right must both be provided"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=["a", "b"])
msg = r"cannot supply join_funcs arg and keys_left / keys_right"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=["a"], keys_right=["a"], join_funcs={})
def test_join_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table(
[
np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]),
["one", "two"],
],
names=["structured", "string"],
)
t2 = Table(
[
np.array([(2.0, 2), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]),
["three", "four"],
],
names=["structured", "string"],
)
t12 = table.join(t1, t2, ["structured"], join_type="outer")
assert t12.pformat() == [
"structured [f, i] string_1 string_2",
"----------------- -------- --------",
" (1., 1) one --",
" (2., 2) two three",
" (4., 4) -- four",
]
class TestSetdiff:
def _setup(self, t_cls=Table):
lines1 = [" a b ", " 0 foo ", " 1 foo ", " 1 bar ", " 2 bar "]
lines2 = [" a b ", " 0 foo ", " 3 foo ", " 4 bar ", " 2 bar "]
lines3 = [
" a b d ",
" 0 foo R1",
" 8 foo R2",
" 1 bar R3",
" 4 bar R4",
]
self.t1 = t_cls.read(lines1, format="ascii")
self.t2 = t_cls.read(lines2, format="ascii")
self.t3 = t_cls.read(lines3, format="ascii")
def test_default_same_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t2)
assert type(out["a"]) is type(self.t1["a"]) # noqa: E721
assert type(out["b"]) is type(self.t1["b"]) # noqa: E721
assert out.pformat() == [" a b ", "--- ---", " 1 bar", " 1 foo"]
def test_default_same_tables(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t1)
assert type(out["a"]) is type(self.t1["a"]) # noqa: E721
assert type(out["b"]) is type(self.t1["b"]) # noqa: E721
assert out.pformat() == [
" a b ",
"--- ---",
]
def test_extra_col_left_table(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.setdiff(self.t3, self.t1)
def test_extra_col_right_table(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t3)
assert type(out["a"]) is type(self.t1["a"]) # noqa: E721
assert type(out["b"]) is type(self.t1["b"]) # noqa: E721
assert out.pformat() == [
" a b ",
"--- ---",
" 1 foo",
" 2 bar",
]
def test_keys(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t3, self.t1, keys=["a", "b"])
assert type(out["a"]) is type(self.t1["a"]) # noqa: E721
assert type(out["b"]) is type(self.t1["b"]) # noqa: E721
assert out.pformat() == [
" a b d ",
"--- --- ---",
" 4 bar R4",
" 8 foo R2",
]
def test_missing_key(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.setdiff(self.t3, self.t1, keys=["a", "d"])
class TestVStack:
def _setup(self, t_cls=Table):
self.t1 = t_cls.read(
[
" a b",
" 0. foo",
" 1. bar",
],
format="ascii",
)
self.t2 = t_cls.read(
[
" a b c",
" 2. pez 4",
" 3. sez 5",
],
format="ascii",
)
self.t3 = t_cls.read(
[
" a b",
" 4. 7",
" 5. 8",
" 6. 9",
],
format="ascii",
)
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]))
self.t2.meta.update(OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]))
self.t4.meta.update(OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)]))
self.t5.meta.update(OrderedDict([("b", 3), ("c", "k"), ("d", 1)]))
self.meta_merge = OrderedDict(
[
("b", [1, 2, 3, 4, 5, 6]),
("c", {"a": 1, "b": 1, "c": 1}),
("d", 1),
("a", 1),
("e", 1),
]
)
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match="Did you accidentally call vstack"):
table.vstack(self.t1, self.t2)
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2[1]])
assert type(out["a"]) is type(self.t1["a"]) # noqa: E721
assert type(out["b"]) is type(self.t1["b"]) # noqa: E721
assert out.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"1.0 bar",
]
def test_stack_table_column(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2["a"]])
assert out.masked is False
assert out.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"0.0 --",
"1.0 --",
]
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.vstack([self.t1, self.t2, self.t4], join_type="inner")
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.vstack([self.t1, self.t5], join_type="inner")
assert len(w) == 2
assert out.meta == self.t5.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.vstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="warn"
)
assert len(w) == 2
assert out.meta == self.t5.meta
out = table.vstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="silent"
)
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.vstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="error"
)
with pytest.raises(ValueError):
out = table.vstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="nonsense"
)
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.vstack([])
with pytest.raises(TypeError):
table.vstack(1)
with pytest.raises(TypeError):
table.vstack([self.t2, 1])
with pytest.raises(ValueError):
table.vstack([self.t1, self.t2], join_type="invalid join type")
def test_stack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type="inner")
assert t12.masked is False
assert type(t12) is operation_table_type
assert type(t12["a"]) is type(t1["a"]) # noqa: E721
assert type(t12["b"]) is type(t1["b"]) # noqa: E721
assert t12.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"2.0 pez",
"3.0 sez",
]
t124 = table.vstack([t1, t2, t4], join_type="inner")
assert type(t124) is operation_table_type
assert type(t12["a"]) is type(t1["a"]) # noqa: E721
assert type(t12["b"]) is type(t1["b"]) # noqa: E721
assert t124.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"2.0 pez",
"3.0 sez",
"0.0 foo",
"1.0 bar",
]
def test_stack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type="outer")
assert t12.masked is False
assert t12.pformat() == [
" a b c ",
"--- --- ---",
"0.0 foo --",
"1.0 bar --",
"2.0 pez 4",
"3.0 sez 5",
]
t124 = table.vstack([t1, t2, t4], join_type="outer")
assert t124.masked is False
assert t124.pformat() == [
" a b c ",
"--- --- ---",
"0.0 foo --",
"1.0 bar --",
"2.0 pez 4",
"3.0 sez 5",
"0.0 foo --",
"1.0 bar --",
]
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type="inner")
assert "The 'b' columns have incompatible types: {}".format(
[self.t1["b"].dtype.name, self.t3["b"].dtype.name]
) in str(excinfo.value)
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type="outer")
assert "The 'b' columns have incompatible types:" in str(excinfo.value)
with pytest.raises(TableMergeError):
table.vstack([self.t1, self.t2], join_type="exact")
t1_reshape = self.t1.copy()
t1_reshape["b"].shape = [2, 1]
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, t1_reshape])
assert "have different shape" in str(excinfo.value)
def test_vstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t4 = self.t4
t4["b"].mask[1] = True
t14 = table.vstack([t1, t4])
assert t14.masked is False
assert t14.pformat() == [
" a b ",
"--- ---",
"0.0 foo",
"1.0 bar",
"0.0 foo",
"1.0 --",
]
def test_col_meta_merge_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1["a"].info.unit = "cm"
t2["a"].info.unit = "m"
t4["a"].info.unit = "km"
# Key col 'a' format should take last when all match
t1["a"].info.format = "%f"
t2["a"].info.format = "%f"
t4["a"].info.format = "%f"
# Key col 'b', take first value 't1_b'
t1["b"].info.description = "t1_b"
# Key col 'b', take first non-empty value '%6s'
t4["b"].info.format = "%6s"
# Key col 'a', should be merged meta
t1["a"].info.meta.update(
OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])
)
t2["a"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
t4["a"].info.meta.update(
OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)])
)
# Key col 'b', should be meta2
t2["b"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
if operation_table_type is Table:
ctx = pytest.warns(metadata.MergeConflictWarning)
else:
ctx = nullcontext()
with ctx as warning_lines:
out = table.vstack([t1, t2, t4], join_type="inner")
if operation_table_type is Table:
assert len(warning_lines) == 2
assert (
"In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message)
)
assert (
"In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message)
)
# Check units are suitably ignored for a regular Table
assert out.pformat() == [
" a b ",
" km ",
"-------- ------",
"0.000000 foo",
"1.000000 bar",
"2.000000 pez",
"3.000000 sez",
"0.000000 foo",
"1.000000 bar",
]
else:
# Check QTable correctly dealt with units.
assert out.pformat() == [
" a b ",
" km ",
"-------- ------",
"0.000000 foo",
"0.000010 bar",
"0.002000 pez",
"0.003000 sez",
"0.000000 foo",
"1.000000 bar",
]
assert out["a"].info.unit == "km"
assert out["a"].info.format == "%f"
assert out["b"].info.description == "t1_b"
assert out["b"].info.format == "%6s"
assert out["a"].info.meta == self.meta_merge
assert out["b"].info.meta == OrderedDict(
[("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]
)
def test_col_meta_merge_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1["a"].unit = "cm"
t2["a"].unit = "m"
t4["a"].unit = "km"
# Key col 'a' format should take last when all match
t1["a"].info.format = "%0d"
t2["a"].info.format = "%0d"
t4["a"].info.format = "%0d"
# Key col 'b', take first value 't1_b'
t1["b"].info.description = "t1_b"
# Key col 'b', take first non-empty value '%6s'
t4["b"].info.format = "%6s"
# Key col 'a', should be merged meta
t1["a"].info.meta.update(
OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])
)
t2["a"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
t4["a"].info.meta.update(
OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)])
)
# Key col 'b', should be meta2
t2["b"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
# All these should pass through
t2["c"].unit = "m"
t2["c"].info.format = "%6s"
t2["c"].info.description = "t2_c"
with pytest.warns(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type="outer")
assert len(warning_lines) == 2
assert (
"In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message)
)
assert (
"In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message)
)
assert out["a"].unit == "km"
assert out["a"].info.format == "%0d"
assert out["b"].info.description == "t1_b"
assert out["b"].info.format == "%6s"
assert out["a"].info.meta == self.meta_merge
assert out["b"].info.meta == OrderedDict(
[("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]
)
assert out["c"].info.unit == "m"
assert out["c"].info.format == "%6s"
assert out["c"].info.description == "t2_c"
def test_vstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.vstack(self.t1)).all()
assert (self.t1 == table.vstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols["m"]
len_col = len(col)
t = table.QTable([col], names=["a"])
cls_name = type(col).__name__
# Vstack works for these classes:
if isinstance(
col,
(
u.Quantity,
Time,
TimeDelta,
SkyCoord,
EarthLocation,
BaseRepresentationOrDifferential,
),
):
out = table.vstack([t, t])
assert len(out) == len_col * 2
if cls_name == "SkyCoord":
# Argh, SkyCoord needs __eq__!!
assert skycoord_equal(out["a"][len_col:], col)
assert skycoord_equal(out["a"][:len_col], col)
elif "Repr" in cls_name or "Diff" in cls_name:
assert np.all(representation_equal(out["a"][:len_col], col))
assert np.all(representation_equal(out["a"][len_col:], col))
else:
assert np.all(out["a"][:len_col] == col)
assert np.all(out["a"][len_col:] == col)
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t])
assert "vstack unavailable for mixin column type(s): {}".format(
cls_name
) in str(err.value)
# Check for outer stack which requires masking. Only Time supports
# this currently.
t2 = table.QTable([col], names=["b"]) # different from col name for t
if isinstance(col, (Time, TimeDelta, Quantity)):
out = table.vstack([t, t2], join_type="outer")
assert len(out) == len_col * 2
assert np.all(out["a"][:len_col] == col)
assert np.all(out["b"][len_col:] == col)
assert check_mask(out["a"], [False] * len_col + [True] * len_col)
assert check_mask(out["b"], [True] * len_col + [False] * len_col)
# check directly stacking mixin columns:
out2 = table.vstack([t, t2["b"]])
assert np.all(out["a"] == out2["a"])
assert np.all(out["b"] == out2["b"])
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t2], join_type="outer")
assert "vstack requires masking" in str(
err.value
) or "vstack unavailable" in str(err.value)
def test_vstack_different_representation(self):
"""Test that representations can be mixed together."""
rep1 = CartesianRepresentation([1, 2] * u.km, [3, 4] * u.km, 1 * u.km)
rep2 = SphericalRepresentation([0] * u.deg, [0] * u.deg, 10 * u.km)
t1 = Table([rep1])
t2 = Table([rep2])
t12 = table.vstack([t1, t2])
expected = CartesianRepresentation(
[1, 2, 10] * u.km, [3, 4, 0] * u.km, [1, 1, 0] * u.km
)
assert np.all(representation_equal(t12["col0"], expected))
rep3 = UnitSphericalRepresentation([0] * u.deg, [0] * u.deg)
t3 = Table([rep3])
with pytest.raises(ValueError, match="representations are inconsistent"):
table.vstack([t1, t3])
def test_vstack_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table(
[
np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]),
["one", "two"],
],
names=["structured", "string"],
)
t2 = Table(
[
np.array([(3.0, 3), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]),
["three", "four"],
],
names=["structured", "string"],
)
t12 = table.vstack([t1, t2])
assert t12.pformat() == [
"structured [f, i] string",
"----------------- ------",
" (1., 1) one",
" (2., 2) two",
" (3., 3) three",
" (4., 4) four",
]
# One table without the structured column.
t3 = t2[("string",)]
t13 = table.vstack([t1, t3])
assert t13.pformat() == [
"structured [f, i] string",
"----------------- ------",
" (1.0, 1) one",
" (2.0, 2) two",
" -- three",
" -- four",
]
class TestDStack:
def _setup(self, t_cls=Table):
self.t1 = t_cls.read(
[
" a b",
" 0. foo",
" 1. bar",
],
format="ascii",
)
self.t2 = t_cls.read(
[
" a b c",
" 2. pez 4",
" 3. sez 5",
],
format="ascii",
)
self.t2["d"] = Time([1, 2], format="cxcsec")
self.t3 = t_cls(
{
"a": [[5.0, 6.0], [4.0, 3.0]],
"b": [["foo", "bar"], ["pez", "sez"]],
},
names=("a", "b"),
)
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
self.t5 = t_cls(
{
"a": [[4.0, 2.0], [1.0, 6.0]],
"b": [["foo", "pez"], ["bar", "sez"]],
},
names=("a", "b"),
)
self.t6 = t_cls.read(
[
" a b c",
" 7. pez 2",
" 4. sez 6",
" 6. foo 3",
],
format="ascii",
)
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match="Did you accidentally call dstack"):
table.dstack(self.t1, self.t2)
@staticmethod
def compare_dstack(tables, out):
for ii, tbl in enumerate(tables):
for name, out_col in out.columns.items():
if name in tbl.colnames:
# Columns always compare equal
assert np.all(tbl[name] == out[name][:, ii])
# If input has a mask then output must have same mask
if hasattr(tbl[name], "mask"):
assert np.all(tbl[name].mask == out[name].mask[:, ii])
# If input has no mask then output might have a mask (if other table
# is missing that column). If so then all mask values should be False.
elif hasattr(out[name], "mask"):
assert not np.any(out[name].mask[:, ii])
else:
# Column missing for this table, out must have a mask with all True.
assert np.all(out[name].mask[:, ii])
def test_dstack_table_column(self, operation_table_type):
"""Stack a table with 3 cols and one column (gets auto-converted to Table)."""
self._setup(operation_table_type)
t2 = self.t1.copy()
out = table.dstack([self.t1, t2["a"]])
self.compare_dstack([self.t1, t2[("a",)]], out)
def test_dstack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t4["a"].mask[0] = True
# Test for non-masked table
t12 = table.dstack([t1, t2], join_type="outer")
assert type(t12) is operation_table_type
assert type(t12["a"]) is type(t1["a"]) # noqa: E721
assert type(t12["b"]) is type(t1["b"]) # noqa: E721
self.compare_dstack([t1, t2], t12)
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type="outer")
assert type(t124) is operation_table_type
assert type(t124["a"]) is type(t4["a"]) # noqa: E721
assert type(t124["b"]) is type(t4["b"]) # noqa: E721
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type="inner")
assert type(t124) is operation_table_type
assert type(t124["a"]) is type(t4["a"]) # noqa: E721
assert type(t124["b"]) is type(t4["b"]) # noqa: E721
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_multi_dimension_column(self, operation_table_type):
self._setup(operation_table_type)
t3 = self.t3
t5 = self.t5
t2 = self.t2
t35 = table.dstack([t3, t5])
assert type(t35) is operation_table_type
assert type(t35["a"]) is type(t3["a"]) # noqa: E721
assert type(t35["b"]) is type(t3["b"]) # noqa: E721
self.compare_dstack([t3, t5], t35)
with pytest.raises(TableMergeError):
table.dstack([t2, t3])
def test_dstack_different_length_table(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t2
t6 = self.t6
with pytest.raises(ValueError):
table.dstack([t2, t6])
def test_dstack_single_table(self):
self._setup(Table)
out = table.dstack(self.t1)
assert np.all(out == self.t1)
def test_dstack_representation(self):
rep1 = SphericalRepresentation([1, 2] * u.deg, [3, 4] * u.deg, 1 * u.kpc)
rep2 = SphericalRepresentation([10, 20] * u.deg, [30, 40] * u.deg, 10 * u.kpc)
t1 = Table([rep1])
t2 = Table([rep2])
t12 = table.dstack([t1, t2])
assert np.all(representation_equal(t12["col0"][:, 0], rep1))
assert np.all(representation_equal(t12["col0"][:, 1], rep2))
def test_dstack_skycoord(self):
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg)
t1 = Table([sc1])
t2 = Table([sc2])
t12 = table.dstack([t1, t2])
assert skycoord_equal(sc1, t12["col0"][:, 0])
assert skycoord_equal(sc2, t12["col0"][:, 1])
def test_dstack_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table(
[
np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]),
["one", "two"],
],
names=["structured", "string"],
)
t2 = Table(
[
np.array([(3.0, 3), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]),
["three", "four"],
],
names=["structured", "string"],
)
t12 = table.dstack([t1, t2])
assert t12.pformat() == [
"structured [f, i] string ",
"------------------ ------------",
"(1., 1) .. (3., 3) one .. three",
"(2., 2) .. (4., 4) two .. four",
]
# One table without the structured column.
t3 = t2[("string",)]
t13 = table.dstack([t1, t3])
assert t13.pformat() == [
"structured [f, i] string ",
"----------------- ------------",
" (1.0, 1) .. -- one .. three",
" (2.0, 2) .. -- two .. four",
]
class TestHStack:
def _setup(self, t_cls=Table):
self.t1 = t_cls.read(
[
" a b",
" 0. foo",
" 1. bar",
],
format="ascii",
)
self.t2 = t_cls.read(
[
" a b c",
" 2. pez 4",
" 3. sez 5",
],
format="ascii",
)
self.t3 = t_cls.read(
[
" d e",
" 4. 7",
" 5. 8",
" 6. 9",
],
format="ascii",
)
self.t4 = t_cls(self.t1, copy=True, masked=True)
self.t4["a"].name = "f"
self.t4["b"].name = "g"
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)]))
self.t2.meta.update(OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)]))
self.t4.meta.update(OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)]))
self.t5.meta.update(OrderedDict([("b", 3), ("c", "k"), ("d", 1)]))
self.meta_merge = OrderedDict(
[
("b", [1, 2, 3, 4, 5, 6]),
("c", {"a": 1, "b": 1, "c": 1}),
("d", 1),
("a", 1),
("e", 1),
]
)
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match="Did you accidentally call hstack"):
table.hstack(self.t1, self.t2)
def test_stack_same_table(self, operation_table_type):
"""
From #2995, test that hstack'ing references to the same table has the
expected output.
"""
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t1])
assert out.masked is False
assert out.pformat() == [
"a_1 b_1 a_2 b_2",
"--- --- --- ---",
"0.0 foo 0.0 foo",
"1.0 bar 1.0 bar",
]
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1[0], self.t2[1]])
assert out.masked is False
assert out.pformat() == [
"a_1 b_1 a_2 b_2 c ",
"--- --- --- --- ---",
"0.0 foo 3.0 sez 5",
]
def test_stack_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2["c"]])
assert type(out["a"]) is type(self.t1["a"]) # noqa: E721
assert type(out["b"]) is type(self.t1["b"]) # noqa: E721
assert type(out["c"]) is type(self.t2["c"]) # noqa: E721
assert out.pformat() == [
" a b c ",
"--- --- ---",
"0.0 foo 4",
"1.0 bar 5",
]
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2, self.t4], join_type="inner")
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.hstack([self.t1, self.t5], join_type="inner")
assert len(w) == 2
assert out.meta == self.t5.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.hstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="warn"
)
assert len(w) == 2
assert out.meta == self.t5.meta
out = table.hstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="silent"
)
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.hstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="error"
)
with pytest.raises(ValueError):
out = table.hstack(
[self.t1, self.t5], join_type="inner", metadata_conflicts="nonsense"
)
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.hstack([])
with pytest.raises(TypeError):
table.hstack(1)
with pytest.raises(TypeError):
table.hstack([self.t2, 1])
with pytest.raises(ValueError):
table.hstack([self.t1, self.t2], join_type="invalid join type")
def test_stack_basic(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t3 = self.t3
t4 = self.t4
out = table.hstack([t1, t2], join_type="inner")
assert out.masked is False
assert type(out) is operation_table_type
assert type(out["a_1"]) is type(t1["a"]) # noqa: E721
assert type(out["b_1"]) is type(t1["b"]) # noqa: E721
assert type(out["a_2"]) is type(t2["a"]) # noqa: E721
assert type(out["b_2"]) is type(t2["b"]) # noqa: E721
assert out.pformat() == [
"a_1 b_1 a_2 b_2 c ",
"--- --- --- --- ---",
"0.0 foo 2.0 pez 4",
"1.0 bar 3.0 sez 5",
]
# stacking as a list gives same result
out_list = table.hstack([t1, t2], join_type="inner")
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2], join_type="outer")
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2, t3, t4], join_type="outer")
assert out.masked is False
assert out.pformat() == [
"a_1 b_1 a_2 b_2 c d e f g ",
"--- --- --- --- --- --- --- --- ---",
"0.0 foo 2.0 pez 4 4.0 7 0.0 foo",
"1.0 bar 3.0 sez 5 5.0 8 1.0 bar",
" -- -- -- -- -- 6.0 9 -- --",
]
out = table.hstack([t1, t2, t3, t4], join_type="inner")
assert out.masked is False
assert out.pformat() == [
"a_1 b_1 a_2 b_2 c d e f g ",
"--- --- --- --- --- --- --- --- ---",
"0.0 foo 2.0 pez 4 4.0 7 0.0 foo",
"1.0 bar 3.0 sez 5 5.0 8 1.0 bar",
]
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
# For join_type exact, which will fail here because n_rows
# does not match
with pytest.raises(TableMergeError):
table.hstack([self.t1, self.t3], join_type="exact")
def test_hstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail()
self._setup(operation_table_type)
t1 = self.t1
t2 = operation_table_type(t1, copy=True, masked=True)
t2.meta.clear()
t2["b"].mask[1] = True
out = table.hstack([t1, t2])
assert out.pformat() == [
"a_1 b_1 a_2 b_2",
"--- --- --- ---",
"0.0 foo 0.0 foo",
"1.0 bar 1.0 --",
]
def test_table_col_rename(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack(
[self.t1, self.t2],
join_type="inner",
uniq_col_name="{table_name}_{col_name}",
table_names=("left", "right"),
)
assert out.masked is False
assert out.pformat() == [
"left_a left_b right_a right_b c ",
"------ ------ ------- ------- ---",
" 0.0 foo 2.0 pez 4",
" 1.0 bar 3.0 sez 5",
]
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t3 = self.t3[:2]
t4 = self.t4
# Just set a bunch of meta and make sure it is the same in output
meta1 = OrderedDict([("b", [1, 2]), ("c", {"a": 1}), ("d", 1)])
t1["a"].unit = "cm"
t1["b"].info.description = "t1_b"
t4["f"].info.format = "%6s"
t1["b"].info.meta.update(meta1)
t3["d"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
t4["g"].info.meta.update(
OrderedDict([("b", [5, 6]), ("c", {"c": 1}), ("e", 1)])
)
t3["e"].info.meta.update(
OrderedDict([("b", [3, 4]), ("c", {"b": 1}), ("a", 1)])
)
t3["d"].unit = "m"
t3["d"].info.format = "%6s"
t3["d"].info.description = "t3_c"
out = table.hstack([t1, t3, t4], join_type="exact")
for t in [t1, t3, t4]:
for name in t.colnames:
for attr in ("meta", "unit", "format", "description"):
assert getattr(out[name].info, attr) == getattr(t[name].info, attr)
# Make sure we got a copy of meta, not ref
t1["b"].info.meta["b"] = None
assert out["b"].info.meta["b"] == [1, 2]
def test_hstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.hstack(self.t1)).all()
assert (self.t1 == table.hstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col1 = mixin_cols["m"]
col2 = col1[2:4] # Shorter version of col1
t1 = table.QTable([col1])
t2 = table.QTable([col2])
cls_name = type(col1).__name__
out = table.hstack([t1, t2], join_type="inner")
assert type(out["col0_1"]) is type(out["col0_2"]) # noqa: E721
assert len(out) == len(col2)
# Check that columns are as expected.
if cls_name == "SkyCoord":
assert skycoord_equal(out["col0_1"], col1[: len(col2)])
assert skycoord_equal(out["col0_2"], col2)
elif "Repr" in cls_name or "Diff" in cls_name:
assert np.all(representation_equal(out["col0_1"], col1[: len(col2)]))
assert np.all(representation_equal(out["col0_2"], col2))
else:
assert np.all(out["col0_1"] == col1[: len(col2)])
assert np.all(out["col0_2"] == col2)
# Time class supports masking, all other mixins do not
if isinstance(col1, (Time, TimeDelta, Quantity)):
out = table.hstack([t1, t2], join_type="outer")
assert len(out) == len(t1)
assert np.all(out["col0_1"] == col1)
assert np.all(out["col0_2"][: len(col2)] == col2)
assert check_mask(out["col0_2"], [False, False, True, True])
# check directly stacking mixin columns:
out2 = table.hstack([t1, t2["col0"]], join_type="outer")
assert np.all(out["col0_1"] == out2["col0_1"])
assert np.all(out["col0_2"] == out2["col0_2"])
else:
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type="outer")
assert "hstack requires masking" in str(err.value)
def test_unique(operation_table_type):
t = operation_table_type.read(
[
" a b c d",
" 2 b 7.0 0",
" 1 c 3.0 5",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 1 a 1.0 7",
" 2 b 5.0 1",
" 0 a 0.0 4",
" 1 a 2.0 6",
" 1 c 3.0 5",
],
format="ascii",
)
tu = operation_table_type(np.sort(t[:-1]))
t_all = table.unique(t)
assert sort_eq(t_all.pformat(), tu.pformat())
t_s = t.copy()
del t_s["b", "c", "d"]
t_all = table.unique(t_s)
assert sort_eq(
t_all.pformat(),
[
" a ",
"---",
" 0",
" 1",
" 2",
],
)
key1 = "a"
t1a = table.unique(t, key1)
assert sort_eq(
t1a.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 1 c 3.0 5",
" 2 b 7.0 0",
],
)
t1b = table.unique(t, key1, keep="last")
assert sort_eq(
t1b.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 1 c 3.0 5",
" 2 b 5.0 1",
],
)
t1c = table.unique(t, key1, keep="none")
assert sort_eq(
t1c.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
],
)
key2 = ["a", "b"]
t2a = table.unique(t, key2)
assert sort_eq(
t2a.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 1 a 1.0 7",
" 1 c 3.0 5",
" 2 a 4.0 3",
" 2 b 7.0 0",
],
)
t2b = table.unique(t, key2, keep="last")
assert sort_eq(
t2b.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 1 a 2.0 6",
" 1 c 3.0 5",
" 2 a 4.0 3",
" 2 b 5.0 1",
],
)
t2c = table.unique(t, key2, keep="none")
assert sort_eq(
t2c.pformat(),
[
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 2 a 4.0 3",
],
)
key2 = ["a", "a"]
with pytest.raises(ValueError) as exc:
t2a = table.unique(t, key2)
assert exc.value.args[0] == "duplicate key names"
with pytest.raises(ValueError) as exc:
table.unique(t, key2, keep=True)
assert exc.value.args[0] == "'keep' should be one of 'first', 'last', 'none'"
t1_m = operation_table_type(t1a, masked=True)
t1_m["a"].mask[1] = True
with pytest.raises(ValueError) as exc:
t1_mu = table.unique(t1_m)
assert (
exc.value.args[0] == "cannot use columns with masked values as keys; "
"remove column 'a' from keys and rerun unique()"
)
t1_mu = table.unique(t1_m, silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [
" a b c d ",
"--- --- --- ---",
" 0 a 0.0 4",
" 2 b 7.0 0",
" -- c 3.0 5",
]
with pytest.raises(ValueError):
t1_mu = table.unique(t1_m, silent=True, keys="a")
t1_m = operation_table_type(t, masked=True)
t1_m["a"].mask[1] = True
t1_m["d"].mask[3] = True
# Test that multiple masked key columns get removed in the correct
# order
t1_mu = table.unique(t1_m, keys=["d", "a", "b"], silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [
" a b c d ",
"--- --- --- ---",
" 2 a 4.0 --",
" 2 b 7.0 0",
" -- c 3.0 5",
]
def test_vstack_bytes(operation_table_type):
"""
Test for issue #5617 when vstack'ing bytes columns in Py3.
This is really an upstream numpy issue numpy/numpy/#8403.
"""
t = operation_table_type([[b"a"]], names=["a"])
assert t["a"].itemsize == 1
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2["a"].itemsize == 1
def test_vstack_unicode():
"""
Test for problem related to issue #5617 when vstack'ing *unicode*
columns. In this case the character size gets multiplied by 4.
"""
t = table.Table([["a"]], names=["a"])
assert t["a"].itemsize == 4 # 4-byte / char for U dtype
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2["a"].itemsize == 4
def test_join_mixins_time_quantity():
"""
Test for table join using non-ndarray key columns.
"""
tm1 = Time([2, 1, 2], format="cxcsec")
q1 = [2, 1, 1] * u.m
idx1 = [1, 2, 3]
tm2 = Time([2, 3], format="cxcsec")
q2 = [2, 3] * u.m
idx2 = [10, 20]
t1 = Table([tm1, q1, idx1], names=["tm", "q", "idx"])
t2 = Table([tm2, q2, idx2], names=["tm", "q", "idx"])
# Output:
#
# <Table length=4>
# tm q idx_1 idx_2
# m
# object float64 int64 int64
# ------------------ ------- ----- -----
# 0.9999999999969589 1.0 2 --
# 2.00000000000351 1.0 3 --
# 2.00000000000351 2.0 1 10
# 3.000000000000469 3.0 -- 20
t12 = table.join(t1, t2, join_type="outer", keys=["tm", "q"])
# Key cols are lexically sorted
assert np.all(t12["tm"] == Time([1, 2, 2, 3], format="cxcsec"))
assert np.all(t12["q"] == [1, 1, 2, 3] * u.m)
assert np.all(t12["idx_1"] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1]))
assert np.all(t12["idx_2"] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0]))
def test_join_mixins_not_sortable():
"""
Test for table join using non-ndarray key columns that are not sortable.
"""
sc = SkyCoord([1, 2], [3, 4], unit="deg,deg")
t1 = Table([sc, [1, 2]], names=["sc", "idx1"])
t2 = Table([sc, [10, 20]], names=["sc", "idx2"])
with pytest.raises(TypeError, match="one or more key columns are not sortable"):
table.join(t1, t2, keys="sc")
def test_join_non_1d_key_column():
c1 = [[1, 2], [3, 4]]
c2 = [1, 2]
t1 = Table([c1, c2], names=["a", "b"])
t2 = t1.copy()
with pytest.raises(ValueError, match="key column 'a' must be 1-d"):
table.join(t1, t2, keys="a")
def test_argsort_time_column():
"""Regression test for #10823."""
times = Time(["2016-01-01", "2018-01-01", "2017-01-01"])
t = Table([times], names=["time"])
i = t.argsort("time")
assert np.all(i == times.argsort())
def test_sort_indexed_table():
"""Test fix for #9473 and #6545 - and another regression test for #10823."""
t = Table([[1, 3, 2], [6, 4, 5]], names=("a", "b"))
t.add_index("a")
t.sort("a")
assert np.all(t["a"] == [1, 2, 3])
assert np.all(t["b"] == [6, 5, 4])
t.sort("b")
assert np.all(t["b"] == [4, 5, 6])
assert np.all(t["a"] == [3, 2, 1])
times = ["2016-01-01", "2018-01-01", "2017-01-01"]
tm = Time(times)
t2 = Table([tm, [3, 2, 1]], names=["time", "flux"])
t2.sort("flux")
assert np.all(t2["flux"] == [1, 2, 3])
t2.sort("time")
assert np.all(t2["flux"] == [3, 1, 2])
assert np.all(t2["time"] == tm[[0, 2, 1]])
# Using the table as a TimeSeries implicitly sets the index, so
# this test is a bit different from the above.
from astropy.timeseries import TimeSeries
ts = TimeSeries(time=times)
ts["flux"] = [3, 2, 1]
ts.sort("flux")
assert np.all(ts["flux"] == [1, 2, 3])
ts.sort("time")
assert np.all(ts["flux"] == [3, 1, 2])
assert np.all(ts["time"] == tm[[0, 2, 1]])
def test_get_out_class():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
assert _get_out_class([c, mc]) is mc.__class__
assert _get_out_class([mc, c]) is mc.__class__
assert _get_out_class([c, c]) is c.__class__
assert _get_out_class([c]) is c.__class__
with pytest.raises(ValueError):
_get_out_class([c, q])
with pytest.raises(ValueError):
_get_out_class([q, c])
def test_masking_required_exception():
"""
Test that outer join, hstack and vstack fail for a mixin column which
does not support masking.
"""
col = table.NdarrayMixin([0, 1, 2, 3])
t1 = table.QTable([[1, 2, 3, 4], col], names=["a", "b"])
t2 = table.QTable([[1, 2], col[:2]], names=["a", "c"])
with pytest.raises(NotImplementedError) as err:
table.vstack([t1, t2], join_type="outer")
assert "vstack unavailable" in str(err.value)
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type="outer")
assert "hstack requires masking" in str(err.value)
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type="outer")
assert "join requires masking" in str(err.value)
def test_stack_columns():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
time = Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"])
sc = SkyCoord([1, 2], [3, 4], unit="deg")
cq = table.Column([11, 22], unit=u.m)
t = table.hstack([c, q])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([q, c])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([mc, q])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([c, mc])
assert t.__class__ is table.Table
assert t.masked is False
t = table.vstack([q, q])
assert t.__class__ is table.QTable
t = table.vstack([c, c])
assert t.__class__ is table.Table
t = table.hstack([c, time])
assert t.__class__ is table.Table
t = table.hstack([c, sc])
assert t.__class__ is table.Table
t = table.hstack([q, time, sc])
assert t.__class__ is table.QTable
with pytest.raises(ValueError):
table.vstack([c, q])
with pytest.raises(ValueError):
t = table.vstack([q, cq])
def test_mixin_join_regression():
# This used to trigger a ValueError:
# ValueError: NumPy boolean array indexing assignment cannot assign
# 6 input values to the 4 output values where the mask is true
t1 = QTable()
t1["index"] = [1, 2, 3, 4, 5]
t1["flux1"] = [2, 3, 2, 1, 1] * u.Jy
t1["flux2"] = [2, 3, 2, 1, 1] * u.Jy
t2 = QTable()
t2["index"] = [3, 4, 5, 6]
t2["flux1"] = [2, 1, 1, 3] * u.Jy
t2["flux2"] = [2, 1, 1, 3] * u.Jy
t12 = table.join(t1, t2, keys=("index", "flux1", "flux2"), join_type="outer")
assert len(t12) == 6
|
1b14f29d9a48874bee75673d6646a3b1a6d6e11f960890955d023c6418617e34 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import warnings
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import table, time
from astropy import units as u
from astropy.tests.helper import assert_follows_unicode_guidelines
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestColumn:
def test_subclass(self, Column):
c = Column(name="a")
assert isinstance(c, np.ndarray)
c2 = c * 2
assert isinstance(c2, Column)
assert isinstance(c2, np.ndarray)
def test_numpy_ops(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name="a")
for op, test_equal in (
(operator.eq, True),
(operator.ne, False),
(operator.ge, True),
(operator.gt, False),
(operator.le, True),
(operator.lt, False),
):
for eq in (op(c, arr), op(arr, c)):
assert np.all(eq) if test_equal else not np.any(eq)
assert len(eq) == 3
if Column is table.Column:
assert type(eq) == np.ndarray
else:
assert type(eq) == np.ma.core.MaskedArray
assert eq.dtype.str == "|b1"
lt = c - 1 < arr
assert np.all(lt)
def test_numpy_boolean_ufuncs(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name="a")
for ufunc, test_true in (
(np.isfinite, True),
(np.isinf, False),
(np.isnan, False),
(np.sign, True),
(np.signbit, False),
):
result = ufunc(c)
assert len(result) == len(c)
assert np.all(result) if test_true else not np.any(result)
if Column is table.Column:
assert type(result) == np.ndarray
else:
assert type(result) == np.ma.core.MaskedArray
if ufunc is not np.sign:
assert result.dtype.str == "|b1"
def test_view(self, Column):
c = np.array([1, 2, 3], dtype=np.int64).view(Column)
assert repr(c) == f"<{Column.__name__} dtype='int64' length=3>\n1\n2\n3"
def test_format(self, Column):
"""Show that the formatted output from str() works"""
from astropy import conf
with conf.set_temp("max_lines", 8):
c1 = Column(np.arange(2000), name="a", dtype=float, format="%6.2f")
assert str(c1).splitlines() == [
" a ",
"-------",
" 0.00",
" 1.00",
" ...",
"1998.00",
"1999.00",
"Length = 2000 rows",
]
def test_convert_numpy_array(self, Column):
d = Column([1, 2, 3], name="a", dtype="i8")
np_data = np.array(d)
assert np.all(np_data == d)
np_data = np.array(d, copy=False)
assert np.all(np_data == d)
np_data = np.array(d, dtype="i4")
assert np.all(np_data == d)
def test_convert_unit(self, Column):
d = Column([1, 2, 3], name="a", dtype="f8", unit="m")
d.convert_unit_to("km")
assert np.all(d.data == [0.001, 0.002, 0.003])
def test_array_wrap(self):
"""Test that the __array_wrap__ method converts a reduction ufunc
output that has a different shape into an ndarray view. Without this a
method call like c.mean() returns a Column array object with length=1."""
# Mean and sum for a 1-d float column
c = table.Column(name="a", data=[1.0, 2.0, 3.0])
assert np.allclose(c.mean(), 2.0)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 6.0)
assert isinstance(c.sum(), (np.floating, float))
# Non-reduction ufunc preserves Column class
assert isinstance(np.cos(c), table.Column)
# Sum for a 1-d int column
c = table.Column(name="a", data=[1, 2, 3])
assert np.allclose(c.sum(), 6)
assert isinstance(c.sum(), (np.integer, int))
# Sum for a 2-d int column
c = table.Column(name="a", data=[[1, 2, 3], [4, 5, 6]])
assert c.sum() == 21
assert isinstance(c.sum(), (np.integer, int))
assert np.all(c.sum(axis=0) == [5, 7, 9])
assert c.sum(axis=0).shape == (3,)
assert isinstance(c.sum(axis=0), np.ndarray)
# Sum and mean for a 1-d masked column
c = table.MaskedColumn(name="a", data=[1.0, 2.0, 3.0], mask=[0, 0, 1])
assert np.allclose(c.mean(), 1.5)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 3.0)
assert isinstance(c.sum(), (np.floating, float))
def test_name_none(self, Column):
"""Can create a column without supplying name, which defaults to None"""
c = Column([1, 2])
assert c.name is None
assert np.all(c == np.array([1, 2]))
def test_quantity_init(self, Column):
c = Column(data=np.array([1, 2, 3]) * u.m)
assert np.all(c.data == np.array([1, 2, 3]))
assert np.all(c.unit == u.m)
c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm)
assert np.all(c.data == np.array([100, 200, 300]))
assert np.all(c.unit == u.cm)
def test_quantity_comparison(self, Column):
# regression test for gh-6532
c = Column([1, 2100, 3], unit="Hz")
q = 2 * u.kHz
check = c < q
assert np.all(check == [True, False, True])
# This already worked, but just in case.
check = q >= c
assert np.all(check == [True, False, True])
def test_attrs_survive_getitem_after_change(self, Column):
"""
Test for issue #3023: when calling getitem with a MaskedArray subclass
the original object attributes are not copied.
"""
c1 = Column(
[1, 2, 3], name="a", unit="m", format="%i", description="aa", meta={"a": 1}
)
c1.name = "b"
c1.unit = "km"
c1.format = "%d"
c1.description = "bb"
c1.meta = {"bbb": 2}
for item in (
slice(None, None),
slice(None, 1),
np.array([0, 2]),
np.array([False, True, False]),
):
c2 = c1[item]
assert c2.name == "b"
assert c2.unit is u.km
assert c2.format == "%d"
assert c2.description == "bb"
assert c2.meta == {"bbb": 2}
# Make sure that calling getitem resulting in a scalar does
# not copy attributes.
val = c1[1]
for attr in ("name", "unit", "format", "description", "meta"):
assert not hasattr(val, attr)
def test_to_quantity(self, Column):
d = Column([1, 2, 3], name="a", dtype="f8", unit="m")
assert np.all(d.quantity == ([1, 2, 3.0] * u.m))
assert np.all(d.quantity.value == ([1, 2, 3.0] * u.m).value)
assert np.all(d.quantity == d.to("m"))
assert np.all(d.quantity.value == d.to("m").value)
np.testing.assert_allclose(
d.to(u.km).value, ([0.001, 0.002, 0.003] * u.km).value
)
np.testing.assert_allclose(
d.to("km").value, ([0.001, 0.002, 0.003] * u.km).value
)
np.testing.assert_allclose(
d.to(u.MHz, u.equivalencies.spectral()).value,
[299.792458, 149.896229, 99.93081933],
)
d_nounit = Column([1, 2, 3], name="a", dtype="f8", unit=None)
with pytest.raises(u.UnitsError):
d_nounit.to(u.km)
assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3]))
# make sure the correct copy/no copy behavior is happening
q = [1, 3, 5] * u.km
# to should always make a copy
d.to(u.km)[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# explicit copying of the quantity should not change the column
d.quantity.copy()[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# but quantity directly is a "view", accessing the underlying column
d.quantity[:] = q
np.testing.assert_allclose(d, [1000, 3000, 5000])
# view should also work for integers
d2 = Column([1, 2, 3], name="a", dtype=int, unit="m")
d2.quantity[:] = q
np.testing.assert_allclose(d2, [1000, 3000, 5000])
# but it should fail for strings or other non-numeric tables
d3 = Column(["arg", "name", "stuff"], name="a", unit="m")
with pytest.raises(TypeError):
d3.quantity
def test_to_funcunit_quantity(self, Column):
"""
Tests for #8424, check if function-unit can be retrieved from column.
"""
d = Column([1, 2, 3], name="a", dtype="f8", unit="dex(AA)")
assert np.all(d.quantity == ([1, 2, 3] * u.dex(u.AA)))
assert np.all(d.quantity.value == ([1, 2, 3] * u.dex(u.AA)).value)
assert np.all(d.quantity == d.to("dex(AA)"))
assert np.all(d.quantity.value == d.to("dex(AA)").value)
# make sure, casting to linear unit works
q = [10, 100, 1000] * u.AA
np.testing.assert_allclose(d.to(u.AA), q)
def test_item_access_type(self, Column):
"""
Tests for #3095, which forces integer item access to always return a plain
ndarray or MaskedArray, even in the case of a multi-dim column.
"""
integer_types = (int, np.int_)
for int_type in integer_types:
c = Column([[1, 2], [3, 4]])
i0 = int_type(0)
i1 = int_type(1)
assert np.all(c[i0] == [1, 2])
assert type(c[i0]) == (
np.ma.MaskedArray if hasattr(Column, "mask") else np.ndarray
)
assert c[i0].shape == (2,)
c01 = c[i0:i1]
assert np.all(c01 == [[1, 2]])
assert isinstance(c01, Column)
assert c01.shape == (1, 2)
c = Column([1, 2])
assert np.all(c[i0] == 1)
assert isinstance(c[i0], np.integer)
assert c[i0].shape == ()
c01 = c[i0:i1]
assert np.all(c01 == [1])
assert isinstance(c01, Column)
assert c01.shape == (1,)
def test_insert_basic(self, Column):
c = Column(
[0, 1, 2],
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1 == [0, 100, 1, 2])
assert c1.attrs_equal(c)
assert type(c) is type(c1)
if hasattr(c1, "mask"):
assert c1.data.shape == c1.mask.shape
c1 = c.insert(-1, 100)
assert np.all(c1 == [0, 1, 100, 2])
c1 = c.insert(3, 100)
assert np.all(c1 == [0, 1, 2, 100])
c1 = c.insert(-3, 100)
assert np.all(c1 == [100, 0, 1, 2])
c1 = c.insert(1, [100, 200, 300])
if hasattr(c1, "mask"):
assert c1.data.shape == c1.mask.shape
# Out of bounds index
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(-4, 100)
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(4, 100)
def test_insert_axis(self, Column):
"""Insert with non-default axis kwarg"""
c = Column([[1, 2], [3, 4]])
c1 = c.insert(1, [5, 6], axis=None)
assert np.all(c1 == [1, 5, 6, 2, 3, 4])
c1 = c.insert(1, [5, 6], axis=1)
assert np.all(c1 == [[1, 5, 2], [3, 6, 4]])
def test_insert_string_expand(self, Column):
c = Column(["a", "b"])
c1 = c.insert(0, "abc")
assert np.all(c1 == ["abc", "a", "b"])
c = Column(["a", "b"])
c1 = c.insert(0, ["c", "def"])
assert np.all(c1 == ["c", "def", "a", "b"])
def test_insert_string_masked_values(self):
c = table.MaskedColumn(["a", "b"])
c1 = c.insert(0, np.ma.masked)
assert np.all(c1 == ["", "a", "b"])
assert np.all(c1.mask == [True, False, False])
assert c1.dtype == "U1"
c2 = c.insert(1, np.ma.MaskedArray(["ccc", "dd"], mask=[True, False]))
assert np.all(c2 == ["a", "ccc", "dd", "b"])
assert np.all(c2.mask == [False, True, False, False])
assert c2.dtype == "U3"
def test_insert_string_type_error(self, Column):
c = Column([1, 2])
with pytest.raises(ValueError, match="invalid literal for int"):
c.insert(0, "string")
c = Column(["a", "b"])
with pytest.raises(TypeError, match="string operation on non-string array"):
c.insert(0, 1)
def test_insert_multidim(self, Column):
c = Column([[1, 2], [3, 4]], name="a", dtype=int)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == [[1, 2], [100, 200], [3, 4]])
# Broadcast
c1 = c.insert(1, 100)
assert np.all(c1 == [[1, 2], [100, 100], [3, 4]])
# Wrong shape
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200, 300])
def test_insert_object(self, Column):
c = Column(["a", 1, None], name="a", dtype=object)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == np.array(["a", [100, 200], 1, None], dtype=object))
def test_insert_masked(self):
c = table.MaskedColumn(
[0, 1, 2], name="a", fill_value=9999, mask=[False, True, False]
)
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert c1.fill_value == 9999
assert np.all(c1.data.mask == [False, False, True, False])
assert type(c) is type(c1)
for mask in (False, True):
c1 = c.insert(1, 100, mask=mask)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert np.all(c1.data.mask == [False, mask, True, False])
def test_masked_multidim_as_list(self):
data = np.ma.MaskedArray([1, 2], mask=[True, False])
c = table.MaskedColumn([data])
assert c.shape == (1, 2)
assert np.all(c[0].mask == [True, False])
def test_insert_masked_multidim(self):
c = table.MaskedColumn([[1, 2], [3, 4]], name="a", dtype=int)
c1 = c.insert(1, [100, 200], mask=True)
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]])
c1 = c.insert(1, [100, 200], mask=[True, False])
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]])
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200], mask=[True, False, True])
def test_mask_on_non_masked_table(self):
"""
When table is not masked and trying to set mask on column then
it's Raise AttributeError.
"""
t = table.Table([[1, 2], [3, 4]], names=("a", "b"), dtype=("i4", "f8"))
with pytest.raises(AttributeError):
t["a"].mask = [True, False]
class TestAttrEqual:
"""Bunch of tests originally from ATpy that test the attrs_equal method."""
def test_5(self, Column):
c1 = Column(name="a", dtype=int, unit="mJy")
c2 = Column(name="a", dtype=int, unit="mJy")
assert c1.attrs_equal(c2)
def test_6(self, Column):
c1 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
c2 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
assert c1.attrs_equal(c2)
def test_7(self, Column):
c1 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
c2 = Column(
name="b",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
assert not c1.attrs_equal(c2)
def test_8(self, Column):
c1 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
c2 = Column(
name="a",
dtype=float,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
assert not c1.attrs_equal(c2)
def test_9(self, Column):
c1 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
c2 = Column(
name="a",
dtype=int,
unit="erg.cm-2.s-1.Hz-1",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
assert not c1.attrs_equal(c2)
def test_10(self, Column):
c1 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
c2 = Column(
name="a",
dtype=int,
unit="mJy",
format="%g",
description="test column",
meta={"c": 8, "d": 12},
)
assert not c1.attrs_equal(c2)
def test_11(self, Column):
c1 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
c2 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="another test column",
meta={"c": 8, "d": 12},
)
assert not c1.attrs_equal(c2)
def test_12(self, Column):
c1 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
c2 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"e": 8, "d": 12},
)
assert not c1.attrs_equal(c2)
def test_13(self, Column):
c1 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
c2 = Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 9, "d": 12},
)
assert not c1.attrs_equal(c2)
def test_col_and_masked_col(self):
c1 = table.Column(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
c2 = table.MaskedColumn(
name="a",
dtype=int,
unit="mJy",
format="%i",
description="test column",
meta={"c": 8, "d": 12},
)
assert c1.attrs_equal(c2)
assert c2.attrs_equal(c1)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
class TestMetaColumn(MetaBaseTest):
test_class = table.Column
args = ()
class TestMetaMaskedColumn(MetaBaseTest):
test_class = table.MaskedColumn
args = ()
def test_getitem_metadata_regression():
"""
Regression test for #1471: MaskedArray does not call __array_finalize__ so
the meta-data was not getting copied over. By overloading _update_from we
are able to work around this bug.
"""
# Make sure that meta-data gets propagated with __getitem__
c = table.Column(
data=[1, 2], name="a", description="b", unit="m", format="%i", meta={"c": 8}
)
assert c[1:2].name == "a"
assert c[1:2].description == "b"
assert c[1:2].unit == "m"
assert c[1:2].format == "%i"
assert c[1:2].meta["c"] == 8
c = table.MaskedColumn(
data=[1, 2], name="a", description="b", unit="m", format="%i", meta={"c": 8}
)
assert c[1:2].name == "a"
assert c[1:2].description == "b"
assert c[1:2].unit == "m"
assert c[1:2].format == "%i"
assert c[1:2].meta["c"] == 8
# As above, but with take() - check the method and the function
c = table.Column(
data=[1, 2, 3], name="a", description="b", unit="m", format="%i", meta={"c": 8}
)
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == "a"
assert subset.description == "b"
assert subset.unit == "m"
assert subset.format == "%i"
assert subset.meta["c"] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.Column)
c = table.MaskedColumn(
data=[1, 2, 3], name="a", description="b", unit="m", format="%i", meta={"c": 8}
)
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == "a"
assert subset.description == "b"
assert subset.unit == "m"
assert subset.format == "%i"
assert subset.meta["c"] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.MaskedColumn)
def test_unicode_guidelines():
arr = np.array([1, 2, 3])
c = table.Column(arr, name="a")
assert_follows_unicode_guidelines(c)
def test_scalar_column():
"""
Column is not designed to hold scalars, but for numpy 1.6 this can happen:
>> type(np.std(table.Column([1, 2])))
astropy.table.column.Column
"""
c = table.Column(1.5)
assert repr(c) == "1.5"
assert str(c) == "1.5"
def test_qtable_column_conversion():
"""
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
"""
qtab = table.QTable([[1, 2], [3, 4.2]], names=["i", "f"])
assert isinstance(qtab["i"], table.column.Column)
assert isinstance(qtab["f"], table.column.Column)
qtab["i"].unit = "km/s"
assert isinstance(qtab["i"], u.Quantity)
assert isinstance(qtab["f"], table.column.Column)
# should follow from the above, but good to make sure as a #4497 regression test
assert isinstance(qtab["i"][0], u.Quantity)
assert isinstance(qtab[0]["i"], u.Quantity)
assert not isinstance(qtab["f"][0], u.Quantity)
assert not isinstance(qtab[0]["f"], u.Quantity)
# Regression test for #5342: if a function unit is assigned, the column
# should become the appropriate FunctionQuantity subclass.
qtab["f"].unit = u.dex(u.cm / u.s**2)
assert isinstance(qtab["f"], u.Dex)
@pytest.mark.parametrize("masked", [True, False])
def test_string_truncation_warning(masked):
"""
Test warnings associated with in-place assignment to a string
column that results in truncation of the right hand side.
"""
from inspect import currentframe, getframeinfo
t = table.Table([["aa", "bb"]], names=["a"], masked=masked)
t["a"][1] = "cc"
t["a"][:] = "dd"
with pytest.warns(
table.StringTruncateWarning,
match=r"truncated right side " r"string\(s\) longer than 2 character\(s\)",
) as w:
frameinfo = getframeinfo(currentframe())
t["a"][0] = "eee" # replace item with string that gets truncated
assert t["a"][0] == "ee"
assert len(w) == 1
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert "test_column" in w[0].filename
with pytest.warns(
table.StringTruncateWarning,
match=r"truncated right side " r"string\(s\) longer than 2 character\(s\)",
) as w:
t["a"][:] = ["ff", "ggg"] # replace item with string that gets truncated
assert np.all(t["a"] == ["ff", "gg"])
assert len(w) == 1
# Test the obscure case of assigning from an array that was originally
# wider than any of the current elements (i.e. dtype is U4 but actual
# elements are U1 at the time of assignment).
val = np.array(["ffff", "gggg"])
val[:] = ["f", "g"]
t["a"][:] = val
assert np.all(t["a"] == ["f", "g"])
def test_string_truncation_warning_masked():
"""
Test warnings associated with in-place assignment to a string
to a masked column, specifically where the right hand side
contains np.ma.masked.
"""
# Test for strings, but also cover assignment of np.ma.masked to
# int and float masked column setting. This was previously only
# covered in an unrelated io.ascii test (test_line_endings) which
# showed an unexpected difference between handling of str and numeric
# masked arrays.
for values in (["a", "b"], [1, 2], [1.0, 2.0]):
mc = table.MaskedColumn(values)
mc[1] = np.ma.masked
assert np.all(mc.mask == [False, True])
mc[:] = np.ma.masked
assert np.all(mc.mask == [True, True])
mc = table.MaskedColumn(["aa", "bb"])
with pytest.warns(
table.StringTruncateWarning,
match=r"truncated right side " r"string\(s\) longer than 2 character\(s\)",
) as w:
mc[:] = [np.ma.masked, "ggg"] # replace item with string that gets truncated
assert mc[1] == "gg"
assert np.all(mc.mask == [True, False])
assert len(w) == 1
@pytest.mark.parametrize("Column", (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_create_from_str(Column):
"""
Create a bytestring Column from strings (including unicode) in Py3.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = "bä"
c = Column([uba, "def"], dtype="S")
assert c.dtype.char == "S"
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, "def"]))
@pytest.mark.parametrize("Column", (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes_obj(Column):
"""
Create a Column of dtype object with bytestring in it and make sure
it keeps the bytestring and not convert to str with accessed.
"""
c = Column([None, b"def"])
assert c.dtype.char == "O"
assert not c[0]
assert c[1] == b"def"
assert isinstance(c[1], bytes)
assert not isinstance(c[1], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([None, b"def"]))
assert not np.all(c[:2] == np.array([None, "def"]))
@pytest.mark.parametrize("Column", (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes(Column):
"""
Create a bytestring Column from bytes and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = "bä"
uba8 = uba.encode("utf-8")
c = Column([uba8, b"def"])
assert c.dtype.char == "S"
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, "def"]))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == "S"
# Array / list comparisons
assert np.all(c == [uba, "def"])
ok = c == [uba8, b"def"]
assert type(ok) is type(c.data) # noqa: E721
assert ok.dtype.char == "?"
assert np.all(ok)
assert np.all(c == np.array([uba, "def"]))
assert np.all(c == np.array([uba8, b"def"]))
# Scalar compare
cmps = (uba, uba8)
for cmp in cmps:
ok = c == cmp
assert type(ok) is type(c.data) # noqa: E721
assert np.all(ok == [True, False])
def test_col_unicode_sandwich_unicode():
"""
Sanity check that Unicode Column behaves normally.
"""
uba = "bä"
uba8 = uba.encode("utf-8")
c = table.Column([uba, "def"], dtype="U")
assert c[0] == uba
assert isinstance(c[:0], table.Column)
assert isinstance(c[0], str)
assert np.all(c[:2] == np.array([uba, "def"]))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == "U"
ok = c == [uba, "def"]
assert type(ok) == np.ndarray
assert ok.dtype.char == "?"
assert np.all(ok)
with warnings.catch_warnings():
# Ignore the FutureWarning in numpy >=1.24 (it is OK).
warnings.filterwarnings("ignore", message=".*elementwise comparison failed.*")
assert np.all(c != [uba8, b"def"])
def test_masked_col_unicode_sandwich():
"""
Create a bytestring MaskedColumn and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
c = table.MaskedColumn([b"abc", b"def"])
c[1] = np.ma.masked
assert isinstance(c[:0], table.MaskedColumn)
assert isinstance(c[0], str)
assert c[0] == "abc"
assert c[1] is np.ma.masked
assert isinstance(c[:], table.MaskedColumn)
assert c[:].dtype.char == "S"
ok = c == ["abc", "def"]
assert ok[0]
assert ok[1] is np.ma.masked
assert np.all(c == [b"abc", b"def"])
assert np.all(c == np.array(["abc", "def"]))
assert np.all(c == np.array([b"abc", b"def"]))
for cmp in ("abc", b"abc"):
ok = c == cmp
assert type(ok) is np.ma.MaskedArray
assert ok[0]
assert ok[1] is np.ma.masked
@pytest.mark.parametrize("Column", (table.Column, table.MaskedColumn))
def test_unicode_sandwich_set(Column):
"""
Test setting
"""
uba = "bä"
c = Column([b"abc", b"def"])
c[0] = b"aa"
assert np.all(c == ["aa", "def"])
c[
0
] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding
assert np.all(c == [uba, "def"])
assert c.pformat() == ["None", "----", " " + uba, " def"]
c[:] = b"cc"
assert np.all(c == ["cc", "cc"])
c[:] = uba
assert np.all(c == [uba, uba])
c[:] = ""
c[:] = [uba, b"def"]
assert np.all(c == [uba, b"def"])
@pytest.mark.parametrize("class1", [table.MaskedColumn, table.Column])
@pytest.mark.parametrize("class2", [table.MaskedColumn, table.Column, str, list])
def test_unicode_sandwich_compare(class1, class2):
"""Test that comparing a bytestring Column/MaskedColumn with various
str (unicode) object types gives the expected result. Tests #6838.
"""
obj1 = class1([b"a", b"c"])
if class2 is str:
obj2 = "a"
elif class2 is list:
obj2 = ["a", "b"]
else:
obj2 = class2(["a", "b"])
assert np.all((obj1 == obj2) == [True, False])
assert np.all((obj2 == obj1) == [True, False])
assert np.all((obj1 != obj2) == [False, True])
assert np.all((obj2 != obj1) == [False, True])
assert np.all((obj1 > obj2) == [False, True])
assert np.all((obj2 > obj1) == [False, False])
assert np.all((obj1 <= obj2) == [True, False])
assert np.all((obj2 <= obj1) == [True, True])
assert np.all((obj1 < obj2) == [False, False])
assert np.all((obj2 < obj1) == [False, True])
assert np.all((obj1 >= obj2) == [True, True])
assert np.all((obj2 >= obj1) == [True, False])
def test_unicode_sandwich_masked_compare():
"""Test the fix for #6839 from #6899."""
c1 = table.MaskedColumn(["a", "b", "c", "d"], mask=[True, False, True, False])
c2 = table.MaskedColumn([b"a", b"b", b"c", b"d"], mask=[True, True, False, False])
for cmp in ((c1 == c2), (c2 == c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert cmp[3]
for cmp in ((c1 != c2), (c2 != c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert not cmp[3]
# Note: comparisons <, >, >=, <= fail to return a masked array entirely,
# see https://github.com/numpy/numpy/issues/10092.
def test_structured_masked_column_roundtrip():
mc = table.MaskedColumn(
[(1.0, 2.0), (3.0, 4.0)], mask=[(False, False), (False, False)], dtype="f8,f8"
)
assert len(mc.dtype.fields) == 2
mc2 = table.MaskedColumn(mc)
assert_array_equal(mc2, mc)
@pytest.mark.parametrize("dtype", ["i4,f4", "f4,(2,)f8"])
def test_structured_empty_column_init(dtype):
dtype = np.dtype(dtype)
c = table.Column(length=5, shape=(2,), dtype=dtype)
assert c.shape == (5, 2)
assert c.dtype == dtype
def test_column_value_access():
"""Can a column's underlying data consistently be accessed via `.value`,
whether it is a `Column`, `MaskedColumn`, `Quantity`, or `Time`?"""
data = np.array([1, 2, 3])
tbl = table.QTable(
{
"a": table.Column(data),
"b": table.MaskedColumn(data),
"c": u.Quantity(data),
"d": time.Time(data, format="mjd"),
}
)
assert type(tbl["a"].value) == np.ndarray
assert type(tbl["b"].value) == np.ma.MaskedArray
assert type(tbl["c"].value) == np.ndarray
assert type(tbl["d"].value) == np.ndarray
def test_masked_column_serialize_method_propagation():
mc = table.MaskedColumn([1.0, 2.0, 3.0], mask=[True, False, True])
assert mc.info.serialize_method["ecsv"] == "null_value"
mc.info.serialize_method["ecsv"] = "data_mask"
assert mc.info.serialize_method["ecsv"] == "data_mask"
mc2 = mc.copy()
assert mc2.info.serialize_method["ecsv"] == "data_mask"
mc3 = table.MaskedColumn(mc)
assert mc3.info.serialize_method["ecsv"] == "data_mask"
mc4 = mc.view(table.MaskedColumn)
assert mc4.info.serialize_method["ecsv"] == "data_mask"
mc5 = mc[1:]
assert mc5.info.serialize_method["ecsv"] == "data_mask"
@pytest.mark.parametrize("dtype", ["S", "U", "i"])
def test_searchsorted(Column, dtype):
c = Column([1, 2, 2, 3], dtype=dtype)
if isinstance(Column, table.MaskedColumn):
# Searchsorted seems to ignore the mask
c[2] = np.ma.masked
if dtype == "i":
vs = (2, [2, 1])
else:
vs = ("2", ["2", "1"], b"2", [b"2", b"1"])
for v in vs:
v = np.array(v, dtype=dtype)
exp = np.searchsorted(c.data, v, side="right")
res = c.searchsorted(v, side="right")
assert np.all(res == exp)
res = np.searchsorted(c, v, side="right")
assert np.all(res == exp)
|
67b505bde189cec46106f8f0ed65e3c9931457adac046cde2666cd320747e9bd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import base64
import contextlib
import errno
import hashlib
import io
import itertools
import os
import pathlib
import platform
import random
import shutil
import stat
import sys
import tempfile
import urllib.error
import urllib.parse
import urllib.request
import warnings
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
from tempfile import NamedTemporaryFile, TemporaryDirectory
import py.path
import pytest
import astropy.utils.data
from astropy import units as _u # u is taken
from astropy.config import paths
from astropy.utils.data import (
CacheDamaged,
CacheMissingWarning,
_deltemps,
_get_download_cache_loc,
_tempfilestodel,
cache_contents,
cache_total_size,
check_download_cache,
check_free_space_in_dir,
clear_download_cache,
compute_hash,
conf,
download_file,
download_files_in_parallel,
export_download_cache,
get_cached_urls,
get_file_contents,
get_free_space_in_dir,
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_fileobj,
get_pkg_data_path,
get_readable_fileobj,
import_download_cache,
import_file_to_cache,
is_url,
is_url_in_cache,
)
from astropy.utils.exceptions import AstropyWarning
CI = os.environ.get("CI", False) == "true"
TESTURL = "http://www.astropy.org"
TESTURL2 = "http://www.astropy.org/about.html"
TESTURL_SSL = "https://www.astropy.org"
TESTLOCAL = get_pkg_data_filename(os.path.join("data", "local.dat"))
# NOTE: Python can be built without bz2 or lzma.
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA
# For when we need "some" test URLs
FEW = 5
# For stress testing the locking system using multiprocessing
N_PARALLEL_HAMMER = 5 # as high as 500 to replicate a bug
# For stress testing the locking system using threads
# (cheaper, works with coverage)
N_THREAD_HAMMER = 10 # as high as 1000 to replicate a bug
def can_rename_directory_in_use():
with TemporaryDirectory() as d:
d1 = os.path.join(d, "a")
d2 = os.path.join(d, "b")
f1 = os.path.join(d1, "file")
os.mkdir(d1)
with open(f1, "w") as f:
f.write("some contents\n")
try:
with open(f1):
os.rename(d1, d2)
except PermissionError:
return False
else:
return True
CAN_RENAME_DIRECTORY_IN_USE = can_rename_directory_in_use()
def url_to(path):
return pathlib.Path(path).resolve().as_uri()
@pytest.fixture
def valid_urls(tmp_path):
def _valid_urls(tmp_path):
for i in itertools.count():
c = os.urandom(16).hex()
fn = tmp_path / f"valid_{str(i)}"
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
yield u, c
return _valid_urls(tmp_path)
@pytest.fixture
def invalid_urls(tmp_path):
def _invalid_urls(tmp_path):
for i in itertools.count():
fn = tmp_path / f"invalid_{str(i)}"
if not os.path.exists(fn):
yield url_to(fn)
return _invalid_urls(tmp_path)
@pytest.fixture
def temp_cache(tmp_path):
with paths.set_temp_cache(tmp_path):
yield None
check_download_cache()
def change_tree_permission(d, writable=False):
if writable:
dirperm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR | stat.S_IWUSR
else:
dirperm = stat.S_IRUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR
for dirpath, dirnames, filenames in os.walk(d):
os.chmod(dirpath, dirperm)
for f in filenames:
os.chmod(os.path.join(dirpath, f), fileperm)
def is_dir_readonly(d):
try:
with NamedTemporaryFile(dir=d):
return False
except PermissionError:
return True
@contextlib.contextmanager
def readonly_dir(d):
try:
change_tree_permission(d, writable=False)
yield
finally:
change_tree_permission(d, writable=True)
@pytest.fixture
def readonly_cache(tmp_path, valid_urls):
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
with readonly_dir(d):
if not is_dir_readonly(d):
pytest.skip("Unable to make directory readonly")
yield urls
assert set(d.iterdir()) == files
check_download_cache()
@pytest.fixture
def fake_readonly_cache(tmp_path, valid_urls, monkeypatch):
def no_mkdir(path, mode=None):
raise OSError(errno.EPERM, "os.mkdir monkeypatched out")
def no_mkdtemp(*args, **kwargs):
"""On Windows, mkdtemp uses mkdir in a loop and therefore hangs
with it monkeypatched out.
"""
raise OSError(errno.EPERM, "os.mkdtemp monkeypatched out")
def no_TemporaryDirectory(*args, **kwargs):
raise OSError(errno.EPERM, "_SafeTemporaryDirectory monkeypatched out")
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
monkeypatch.setattr(os, "mkdir", no_mkdir)
monkeypatch.setattr(tempfile, "mkdtemp", no_mkdtemp)
monkeypatch.setattr(
astropy.utils.data, "_SafeTemporaryDirectory", no_TemporaryDirectory
)
yield urls
assert set(d.iterdir()) == files
check_download_cache()
def test_download_file_basic(valid_urls, temp_cache):
u, c = next(valid_urls)
assert get_file_contents(download_file(u, cache=False)) == c
assert not is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache miss
assert is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache hit
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_download_file_absolute_path(valid_urls, temp_cache):
def is_abs(p):
return p == os.path.abspath(p)
u, c = next(valid_urls)
assert is_abs(download_file(u, cache=False)) # no cache
assert is_abs(download_file(u, cache=True)) # not in cache
assert is_abs(download_file(u, cache=True)) # in cache
for k, v in cache_contents().items():
assert is_abs(v)
def test_unicode_url(valid_urls, temp_cache):
u, c = next(valid_urls)
unicode_url = "http://é—☃—è.com"
download_file(unicode_url, cache=False, sources=[u])
download_file(unicode_url, cache=True, sources=[u])
download_file(unicode_url, cache=True, sources=[])
assert is_url_in_cache(unicode_url)
assert unicode_url in cache_contents()
def test_too_long_url(valid_urls, temp_cache):
u, c = next(valid_urls)
long_url = "http://" + "a" * 256 + ".com"
download_file(long_url, cache=False, sources=[u])
download_file(long_url, cache=True, sources=[u])
download_file(long_url, cache=True, sources=[])
def test_case_collision(valid_urls, temp_cache):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f1 = download_file("http://example.com/thing", cache=True, sources=[u])
f2 = download_file("http://example.com/THING", cache=True, sources=[u2])
assert f1 != f2
assert get_file_contents(f1) != get_file_contents(f2)
def test_domain_name_case(valid_urls, temp_cache):
u, c = next(valid_urls)
download_file("http://Example.com/thing", cache=True, sources=[u])
assert is_url_in_cache("http://EXAMPLE.com/thing")
download_file("http://EXAMPLE.com/thing", cache=True, sources=[])
assert is_url_in_cache("Http://example.com/thing")
download_file("Http://example.com/thing", cache=True, sources=[])
@pytest.mark.remote_data(source="astropy")
def test_download_nocache_from_internet():
fnout = download_file(TESTURL, cache=False)
assert os.path.isfile(fnout)
@pytest.fixture
def a_binary_file(tmp_path):
fn = tmp_path / "file"
b_contents = b"\xde\xad\xbe\xef"
with open(fn, "wb") as f:
f.write(b_contents)
yield fn, b_contents
@pytest.fixture
def a_file(tmp_path):
fn = tmp_path / "file.txt"
contents = "contents\n"
with open(fn, "w") as f:
f.write(contents)
yield fn, contents
def test_temp_cache(tmp_path):
dldir0 = _get_download_cache_loc()
check_download_cache()
with paths.set_temp_cache(tmp_path):
dldir1 = _get_download_cache_loc()
check_download_cache()
assert dldir1 != dldir0
dldir2 = _get_download_cache_loc()
check_download_cache()
assert dldir2 != dldir1
assert dldir2 == dldir0
# Check that things are okay even if we exit via an exception
class Special(Exception):
pass
try:
with paths.set_temp_cache(tmp_path):
dldir3 = _get_download_cache_loc()
check_download_cache()
assert dldir3 == dldir1
raise Special
except Special:
pass
dldir4 = _get_download_cache_loc()
check_download_cache()
assert dldir4 != dldir3
assert dldir4 == dldir0
@pytest.mark.parametrize("parallel", [False, True])
def test_download_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache, parallel
):
# This is a combined test because the parallel version triggered a nasty
# bug and I was trying to track it down by comparing with the non-parallel
# version. I think the bug was that the parallel downloader didn't respect
# temporary cache settings.
# Make a big list of test URLs
u, c = next(valid_urls)
# as tuples (URL, right_content, wrong_content)
urls = [(u, c, None)]
# where to download the contents
sources = {}
# Set up some URLs to download where the "true" URL is not in the sources
# list; make the true URL valid with different contents so we can tell if
# it was loaded by mistake.
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
# For many of them the sources list starts with invalid URLs
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
# Now fetch them all
if parallel:
rs = download_files_in_parallel(
[u for (u, c, c_bad) in urls], cache=True, sources=sources
)
else:
rs = [
download_file(u, cache=True, sources=sources.get(u, None))
for (u, c, c_bad) in urls
]
assert len(rs) == len(urls)
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
assert is_url_in_cache(u)
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many(temp_cache, valid_urls):
"""Hammer download_file with multiple threaded requests.
The goal is to stress-test the locking system. Normal parallel downloading
also does this but coverage tools lose track of which paths are explored.
"""
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(lambda u: download_file(u, cache=True), [u for (u, c) in urls]))
check_download_cache()
assert len(r) == len(urls)
for r, (u, c) in zip(r, urls):
assert get_file_contents(r) == c
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_threaded_segfault(valid_urls):
"""Demonstrate urllib's segfault."""
def slurp_url(u):
with urllib.request.urlopen(u) as remote:
block = True
while block:
block = remote.read(1024)
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
list(P.map(lambda u: slurp_url(u), [u for (u, c) in urls]))
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many_partial_success(
temp_cache, valid_urls, invalid_urls
):
"""Hammer download_file with multiple threaded requests.
Because some of these requests fail, the locking context manager is
exercised with exceptions as well as success returns. I do not expect many
surprises from the threaded version, but the process version gave trouble
here.
"""
urls = []
contents = {}
for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER):
urls.append(u)
contents[u] = c
urls.append(i)
def get(u):
try:
return download_file(u, cache=True)
except OSError:
return None
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(get, urls))
check_download_cache()
assert len(r) == len(urls)
for r, u in zip(r, urls):
if u in contents:
assert get_file_contents(r) == contents[u]
else:
assert r is None
def test_clear_download_cache(valid_urls):
u1, c1 = next(valid_urls)
download_file(u1, cache=True)
u2, c2 = next(valid_urls)
download_file(u2, cache=True)
assert is_url_in_cache(u2)
clear_download_cache(u2)
assert not is_url_in_cache(u2)
assert is_url_in_cache(u1)
u3, c3 = next(valid_urls)
f3 = download_file(u3, cache=True)
assert is_url_in_cache(u3)
clear_download_cache(f3)
assert not is_url_in_cache(u3)
assert is_url_in_cache(u1)
u4, c4 = next(valid_urls)
f4 = download_file(u4, cache=True)
assert is_url_in_cache(u4)
clear_download_cache(compute_hash(f4))
assert not is_url_in_cache(u4)
assert is_url_in_cache(u1)
def test_clear_download_multiple_references_doesnt_corrupt_storage(
temp_cache, tmp_path
):
"""Check that files with the same hash don't confuse the storage."""
content = "Test data; doesn't matter much.\n"
def make_url():
with NamedTemporaryFile("w", dir=tmp_path, delete=False) as f:
f.write(content)
url = url_to(f.name)
clear_download_cache(url)
filename = download_file(url, cache=True)
return url, filename
a_url, a_filename = make_url()
clear_download_cache(a_filename)
assert not is_url_in_cache(a_url)
f_url, f_filename = make_url()
g_url, g_filename = make_url()
assert f_url != g_url
assert is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
clear_download_cache(f_url)
assert not is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
assert os.path.exists(
g_filename
), "Contents should not be deleted while a reference exists"
clear_download_cache(g_url)
assert not os.path.exists(
g_filename
), "No reference exists any more, file should be deleted"
@pytest.mark.parametrize("use_cache", [False, True])
def test_download_file_local_cache_survives(tmp_path, temp_cache, use_cache):
"""Confirm that downloading a local file does not delete it.
When implemented with urlretrieve (rather than urlopen) local files are
not copied to create temporaries, so importing them to the cache deleted
the original from wherever it was in the filesystem. I lost some built-in
astropy data.
"""
fn = tmp_path / "file"
contents = "some text"
with open(fn, "w") as f:
f.write(contents)
u = url_to(fn)
f = download_file(u, cache=use_cache)
assert fn not in _tempfilestodel, "File should not be deleted!"
assert os.path.isfile(fn), "File should not be deleted!"
assert get_file_contents(f) == contents
def test_sources_normal(temp_cache, valid_urls, invalid_urls):
primary, contents = next(valid_urls)
fallback1 = next(invalid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_fallback(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_ignore_primary(temp_cache, valid_urls, invalid_urls):
primary, bogus = next(valid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_multiple(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_sources_multiple_missing(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2 = next(invalid_urls)
with pytest.raises(urllib.error.URLError):
download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert not is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_update_url(tmp_path, temp_cache):
with TemporaryDirectory(dir=tmp_path) as d:
f_name = os.path.join(d, "f")
with open(f_name, "w") as f:
f.write("old")
f_url = url_to(f.name)
assert get_file_contents(download_file(f_url, cache=True)) == "old"
with open(f_name, "w") as f:
f.write("new")
assert get_file_contents(download_file(f_url, cache=True)) == "old"
assert get_file_contents(download_file(f_url, cache="update")) == "new"
# Now the URL doesn't exist any more.
assert not os.path.exists(f_name)
with pytest.raises(urllib.error.URLError):
# Direct download should fail
download_file(f_url, cache=False)
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Cached version should still exist"
with pytest.raises(urllib.error.URLError):
# cannot download new version to check for updates
download_file(f_url, cache="update")
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Failed update should not remove the current version"
@pytest.mark.remote_data(source="astropy")
def test_download_noprogress():
fnout = download_file(TESTURL, cache=False, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_cache():
download_dir = _get_download_cache_loc()
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache("http://this_was_never_downloaded_before.com")
# Make sure lockdir was released
lockdir = os.path.join(download_dir, "lock")
assert not os.path.isdir(lockdir), "Cache dir lock was not released!"
@pytest.mark.remote_data(source="astropy")
def test_download_certificate_verification_failed():
"""Tests for https://github.com/astropy/astropy/pull/10434"""
# First test the expected exception when download fails due to a
# certificate verification error; we simulate this by passing a bogus
# CA directory to the ssl_context argument
ssl_context = {"cafile": None, "capath": "/does/not/exist"}
msg = f"Verification of TLS/SSL certificate at {TESTURL_SSL} failed"
with pytest.raises(urllib.error.URLError, match=msg):
download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context)
with pytest.warns(AstropyWarning, match=msg) as warning_lines:
fnout = download_file(
TESTURL_SSL, cache=False, ssl_context=ssl_context, allow_insecure=True
)
assert len(warning_lines) == 1
assert os.path.isfile(fnout)
def test_download_cache_after_clear(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
download_dir = _get_download_cache_loc()
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_parallel_from_internet_works(temp_cache):
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = "intersphinx/README"
urls = []
sources = {}
for s in ["", fileloc]:
urls.append(main_url + s)
sources[urls[-1]] = [urls[-1], mirror_url + s]
fnout = download_files_in_parallel(urls, sources=sources)
assert all([os.path.isfile(f) for f in fnout]), fnout
@pytest.mark.parametrize("method", [None, "spawn"])
def test_download_parallel_fills_cache(tmp_path, valid_urls, method):
urls = []
# tmp_path is shared between many tests, and that can cause weird
# interactions if we set the temporary cache too directly
with paths.set_temp_cache(tmp_path):
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel(
[u for (u, c) in urls], multiprocessing_start_method=method
)
assert len(rs) == len(urls)
url_set = {u for (u, c) in urls}
assert url_set <= set(get_cached_urls())
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
check_download_cache()
assert not url_set.intersection(get_cached_urls())
check_download_cache()
def test_download_parallel_with_empty_sources(valid_urls, temp_cache):
urls = []
sources = {}
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel([u for (u, c) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c) in urls)
# assert u <= set(get_cached_urls())
check_download_cache()
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
def test_download_parallel_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache
):
u, c = next(valid_urls)
urls = [(u, c, None)]
sources = {}
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
rs = download_files_in_parallel([u for (u, c, c_bad) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c, c_bad) in urls)
# assert u <= set(get_cached_urls())
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
def test_download_parallel_many(temp_cache, valid_urls):
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
r = download_files_in_parallel([u for (u, c) in td])
assert len(r) == len(td)
for r, (u, c) in zip(r, td):
assert get_file_contents(r) == c
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful download works.
Even in the presence of many requested URLs, presumably hitting all the
parallelism this system can manage, a download failure leads to a tidy
shutdown.
"""
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
u_bad = next(invalid_urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel([u_bad] + [u for (u, c) in td])
# Actually some files may get downloaded, others not.
# Is this good? Should we stubbornly keep trying?
# assert not any([is_url_in_cache(u) for (u, c) in td])
@pytest.mark.slow
def test_download_parallel_partial_success_lock_safe(
temp_cache, valid_urls, invalid_urls
):
"""Check that a partially successful parallel download leaves the cache unlocked.
This needs to be repeated many times because race conditions are what cause
this sort of thing, especially situations where a process might be forcibly
shut down while it holds the lock.
"""
s = random.getstate()
try:
random.seed(0)
for _ in range(N_PARALLEL_HAMMER):
td = list(islice(valid_urls, FEW))
u_bad = next(invalid_urls)
urls = [u_bad] + [u for (u, c) in td]
random.shuffle(urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel(urls)
finally:
random.setstate(s)
def test_download_parallel_update(temp_cache, tmp_path):
td = []
for i in range(N_PARALLEL_HAMMER):
c = f"{i:04d}"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
clear_download_cache(u)
td.append((fn, u, c))
r1 = download_files_in_parallel([u for (fn, u, c) in td])
assert len(r1) == len(td)
for r_1, (fn, u, c) in zip(r1, td):
assert get_file_contents(r_1) == c
td2 = []
for fn, u, c in td:
c_plus = f"{c} updated"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c_plus)
td2.append((fn, u, c, c_plus))
r2 = download_files_in_parallel([u for (fn, u, c) in td], cache=True)
assert len(r2) == len(td)
for r_2, (fn, u, c, c_plus) in zip(r2, td2):
assert get_file_contents(r_2) == c
assert c != c_plus
r3 = download_files_in_parallel([u for (fn, u, c) in td], cache="update")
assert len(r3) == len(td)
for r_3, (fn, u, c, c_plus) in zip(r3, td2):
assert get_file_contents(r_3) != c
assert get_file_contents(r_3) == c_plus
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel(temp_cache, valid_urls):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(i):
return download_file(u, cache="update", sources=[u2])
with ThreadPoolExecutor(max_workers=N_THREAD_HAMMER) as P:
r = set(P.map(update, range(N_THREAD_HAMMER)))
check_download_cache()
for f in r:
assert get_file_contents(f) == c2
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel_multi(temp_cache, valid_urls):
u, c = next(valid_urls)
iucs = list(islice(valid_urls, N_THREAD_HAMMER))
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(uc):
u2, c2 = uc
return download_file(u, cache="update", sources=[u2]), c2
with ThreadPoolExecutor(max_workers=len(iucs)) as P:
r = list(P.map(update, iucs))
check_download_cache()
assert any(get_file_contents(f) == c for (f, c) in r)
@pytest.mark.remote_data(source="astropy")
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding="utf-8") as page:
assert page.read().find("Astropy") > -1
def test_find_by_hash(valid_urls, temp_cache):
testurl, contents = next(valid_urls)
p = download_file(testurl, cache=True)
hash = compute_hash(p)
hashstr = "hash/" + hash
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(fnout)
assert not os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename(
"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli"
)
@pytest.mark.parametrize("package", [None, "astropy", "numpy"])
def test_get_invalid(package):
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package)
assert not os.path.isfile(path)
assert not os.path.isdir(path)
# Package data functions
@pytest.mark.parametrize(
"filename", ["local.dat", "local.dat.gz", "local.dat.bz2", "local.dat.xz"]
)
def test_local_data_obj(filename):
if (not HAS_BZ2 and "bz2" in filename) or (not HAS_LZMA and "xz" in filename):
with pytest.raises(ValueError, match=r" format files are not supported"):
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
else:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.fixture(params=["invalid.dat.bz2", "invalid.dat.gz"])
def bad_compressed(request, tmp_path):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b"BZhinvalid"
gz_content = b"\x1f\x8b\x08invalid"
datafile = tmp_path / request.param
filename = str(datafile)
if filename.endswith(".bz2"):
contents = bz_content
elif filename.endswith(".gz"):
contents = gz_content
else:
contents = "invalid"
datafile.write_bytes(contents)
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith(".bz2")
is_xz = bad_compressed.endswith(".xz")
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):
with pytest.raises(
ModuleNotFoundError, match=r"does not provide the [lb]z[2m]a? module\."
):
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
f.read()
else:
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
assert f.read().rstrip().endswith(b"invalid")
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith("local.dat")
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), "data")
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert os.path.normcase(filename) == (
os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt"))
)
finally:
sys.path.pop(0)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
with pytest.raises(RuntimeError):
get_pkg_data_filename("../../../data/README.rst")
def test_compute_hash(tmp_path):
rands = b"1234567890abcdefghijklmnopqrstuvwxyz"
filename = tmp_path / "tmp.dat"
with open(filename, "wb") as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
with get_pkg_data_fileobj("data/local.dat") as f:
contents1 = f.read()
contents2 = get_pkg_data_contents("data/local.dat")
assert contents1 == contents2
@pytest.mark.remote_data(source="astropy")
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setenv("XDG_CACHE_HOME", "bar")
monkeypatch.delenv("XDG_CACHE_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError()
monkeypatch.setattr(paths, "_find_or_create_root_dir", osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir(rootname="astropy")
with pytest.warns(CacheMissingWarning) as warning_lines:
fnout = download_file(TESTURL, cache=True)
n_warns = len(warning_lines)
partial_warn_msgs = ["remote data cache could not be accessed", "temporary file"]
if n_warns == 4:
partial_warn_msgs.extend(["socket", "socket"])
for wl in warning_lines:
cur_w = str(wl).lower()
for i, partial_msg in enumerate(partial_warn_msgs):
if partial_msg in cur_w:
del partial_warn_msgs[i]
break
assert (
len(partial_warn_msgs) == 0
), f"Got some unexpected warnings: {partial_warn_msgs}"
assert n_warns in (2, 4), f"Expected 2 or 4 warnings, got {n_warns}"
assert os.path.isfile(fnout)
# clearing the cache should be a no-up that doesn't affect fnout
with pytest.warns(
CacheMissingWarning, match=r".*Not clearing data cache - cache inaccessible.*"
):
clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
_deltemps()
assert not os.path.isfile(fnout)
# now try with no cache
fnnocache = download_file(TESTURL, cache=False)
with open(fnnocache, "rb") as page:
assert page.read().decode("utf-8").find("Astropy") > -1
# no warnings should be raise in fileobj because cache is unnecessary
@pytest.mark.parametrize(
"filename",
[
"unicode.txt",
"unicode.txt.gz",
pytest.param(
"unicode.txt.bz2",
marks=pytest.mark.xfail(not HAS_BZ2, reason="no bz2 support"),
),
pytest.param(
"unicode.txt.xz",
marks=pytest.mark.xfail(not HAS_LZMA, reason="no lzma support"),
),
],
)
def test_read_unicode(filename):
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="utf-8")
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="binary")
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
# fmt: off
assert x == (
b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0\xd7\x95"
b"\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]
)
# fmt: on
def test_compressed_stream():
gzipped_data = (
b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA=="
)
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b""
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding="binary") as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.mark.remote_data(source="astropy")
def test_invalid_location_download_raises_urlerror():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
with pytest.raises(urllib.error.URLError):
download_file("http://www.astropy.org/nonexistentfile")
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file("http://astropy.org/nonexistentfile")
@pytest.mark.remote_data(source="astropy")
def test_is_url_in_cache_remote():
assert not is_url_in_cache("http://astropy.org/nonexistentfile")
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):
testurl, contents = next(valid_urls)
nonexistent = next(invalid_urls)
assert not is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
download_file(testurl, cache=True, show_progress=False)
assert is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
# If non-deterministic failure happens see
# https://github.com/astropy/astropy/issues/9765
def test_check_download_cache(tmp_path, temp_cache, valid_urls, invalid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
clear_download_cache()
assert not check_download_cache()
download_file(testurl, cache=True)
check_download_cache()
download_file(testurl2, cache=True)
check_download_cache()
export_download_cache(zip_file_name, [testurl, testurl2])
check_download_cache()
clear_download_cache(testurl2)
check_download_cache()
import_download_cache(zip_file_name, [testurl])
check_download_cache()
def test_export_import_roundtrip_one(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
f = download_file(testurl, cache=True, show_progress=False)
assert get_file_contents(f) == contents
initial_urls_in_cache = set(get_cached_urls())
zip_file_name = tmp_path / "the.zip"
export_download_cache(zip_file_name, [testurl])
clear_download_cache(testurl)
import_download_cache(zip_file_name)
assert is_url_in_cache(testurl)
assert set(get_cached_urls()) == initial_urls_in_cache
assert (
get_file_contents(download_file(testurl, cache=True, show_progress=False))
== contents
)
def test_export_url_not_present(temp_cache, valid_urls):
testurl, contents = next(valid_urls)
with NamedTemporaryFile("wb") as zip_file:
assert not is_url_in_cache(testurl)
with pytest.raises(KeyError):
export_download_cache(zip_file, [testurl])
def test_import_one(tmp_path, temp_cache, valid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
download_file(testurl, cache=True)
download_file(testurl2, cache=True)
assert is_url_in_cache(testurl2)
export_download_cache(zip_file_name, [testurl, testurl2])
clear_download_cache(testurl)
clear_download_cache(testurl2)
import_download_cache(zip_file_name, [testurl])
assert is_url_in_cache(testurl)
assert not is_url_in_cache(testurl2)
def test_export_import_roundtrip(tmp_path, temp_cache, valid_urls):
zip_file_name = tmp_path / "the.zip"
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
export_download_cache(zip_file_name)
clear_download_cache()
import_download_cache(zip_file_name)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_import_roundtrip_stream(temp_cache, valid_urls):
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
clear_download_cache()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_overwrite_flag_works(temp_cache, valid_urls, tmp_path):
fn = tmp_path / "f.zip"
c = b"Some contents\nto check later"
with open(fn, "wb") as f:
f.write(c)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with pytest.raises(FileExistsError):
export_download_cache(fn)
assert get_file_contents(fn, encoding="binary") == c
export_download_cache(fn, overwrite=True)
assert get_file_contents(fn, encoding="binary") != c
def test_export_import_roundtrip_different_location(tmp_path, valid_urls):
original_cache = tmp_path / "original"
original_cache.mkdir()
zip_file_name = tmp_path / "the.zip"
urls = list(islice(valid_urls, FEW))
initial_urls_in_cache = {u for (u, c) in urls}
with paths.set_temp_cache(original_cache):
for u, c in urls:
download_file(u, cache=True)
assert set(get_cached_urls()) == initial_urls_in_cache
export_download_cache(zip_file_name)
new_cache = tmp_path / "new"
new_cache.mkdir()
with paths.set_temp_cache(new_cache):
import_download_cache(zip_file_name)
check_download_cache()
assert set(get_cached_urls()) == initial_urls_in_cache
for u, c in urls:
assert get_file_contents(download_file(u, cache=True)) == c
def test_cache_size_is_zero_when_empty(temp_cache):
assert not get_cached_urls()
assert cache_total_size() == 0
def test_cache_size_changes_correctly_when_files_are_added_and_removed(
temp_cache, valid_urls
):
u, c = next(valid_urls)
clear_download_cache(u)
s_i = cache_total_size()
download_file(u, cache=True)
assert cache_total_size() == s_i + len(c) + len(u.encode("utf-8"))
clear_download_cache(u)
assert cache_total_size() == s_i
def test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):
r = []
for a, a_c in islice(valid_urls, FEW):
a_f = download_file(a, cache=True)
r.append((a, a_c, a_f))
assert set(cache_contents().keys()) == set(get_cached_urls())
for u, c, h in r:
assert cache_contents()[u] == h
@pytest.mark.parametrize("desired_size", [1_000_000_000_000_000_000, 1 * _u.Ebyte])
def test_free_space_checker_huge(tmp_path, desired_size):
with pytest.raises(OSError):
check_free_space_in_dir(tmp_path, desired_size)
def test_get_free_space_file_directory(tmp_path):
fn = tmp_path / "file"
with open(fn, "w"):
pass
with pytest.raises(OSError):
get_free_space_in_dir(fn)
free_space = get_free_space_in_dir(tmp_path)
assert free_space > 0 and not hasattr(free_space, "unit")
# TODO: If unit=True starts to auto-guess prefix, this needs updating.
free_space = get_free_space_in_dir(tmp_path, unit=True)
assert free_space > 0 and free_space.unit == _u.byte
free_space = get_free_space_in_dir(tmp_path, unit=_u.Mbit)
assert free_space > 0 and free_space.unit == _u.Mbit
def test_download_file_bogus_settings(invalid_urls, temp_cache):
u = next(invalid_urls)
with pytest.raises(KeyError):
download_file(u, sources=[])
def test_download_file_local_directory(tmp_path):
"""Make sure we get a URLError rather than OSError even if it's a
local directory."""
with pytest.raises(urllib.request.URLError):
download_file(url_to(tmp_path))
def test_download_file_schedules_deletion(valid_urls):
u, c = next(valid_urls)
f = download_file(u)
assert f in _tempfilestodel
# how to test deletion actually occurs?
def test_clear_download_cache_refuses_to_delete_outside_the_cache(tmp_path):
fn = str(tmp_path / "file")
with open(fn, "w") as f:
f.write("content")
assert os.path.exists(fn)
with pytest.raises(RuntimeError):
clear_download_cache(fn)
assert os.path.exists(fn)
def test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf = os.path.abspath(os.path.join(dldir, "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):
u, c = next(valid_urls)
f = download_file(u, cache=True)
bf = os.path.abspath(os.path.join(os.path.dirname(f), "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_cleanup(temp_cache, valid_urls):
u, c = next(valid_urls)
fn = download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf1 = os.path.abspath(os.path.join(dldir, "bogus1"))
with open(bf1, "w") as f:
f.write("bogus file that exists")
bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), "bogus2"))
with open(bf2, "w") as f:
f.write("other bogus file that exists")
bf3 = os.path.abspath(os.path.join(dldir, "contents"))
with open(bf3, "w") as f:
f.write("awkwardly-named bogus file that exists")
u2, c2 = next(valid_urls)
f2 = download_file(u, cache=True)
os.unlink(f2)
bf4 = os.path.dirname(f2)
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert set(e.value.bad_files) == {bf1, bf2, bf3, bf4}
for bf in e.value.bad_files:
clear_download_cache(bf)
# download cache will be checked on exit
def test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):
u, _ = next(valid_urls)
download_file(u, cache=True)
download_file(u, cache="update")
def test_cache_dir_is_actually_a_file(tmp_path, valid_urls):
"""Ensure that bogus cache settings are handled sensibly.
Because the user can specify the cache location in a config file, and
because they might try to deduce the location by looking around at what's
in their directory tree, and because the cache directory is actual several
tree levels down from the directory set in the config file, it's important
to check what happens if each of the steps in the path is wrong somehow.
"""
def check_quietly_ignores_bogus_cache():
"""We want a broken cache to produce a warning but then astropy should
act like there isn't a cache.
"""
with pytest.warns(CacheMissingWarning):
assert not get_cached_urls()
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache("http://www.example.com/")
with pytest.warns(CacheMissingWarning):
assert not cache_contents()
with pytest.warns(CacheMissingWarning):
u, c = next(valid_urls)
r = download_file(u, cache=True)
assert get_file_contents(r) == c
# check the filename r appears in a warning message?
# check r is added to the delete_at_exit list?
# in fact should there be testing of the delete_at_exit mechanism,
# as far as that is possible?
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache(u)
with pytest.warns(CacheMissingWarning):
with pytest.raises(OSError):
check_download_cache()
dldir = _get_download_cache_loc()
# set_temp_cache acts weird if it is pointed at a file (see below)
# but we want to see what happens when the cache is pointed
# at a file instead of a directory, so make a directory we can
# replace later.
fn = tmp_path / "file"
ct = "contents\n"
os.mkdir(fn)
with paths.set_temp_cache(fn):
shutil.rmtree(fn)
with open(fn, "w") as f:
f.write(ct)
with pytest.raises(OSError):
paths.get_cache_dir()
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(fn) == ct, "File should not be harmed."
# See what happens when set_temp_cache is pointed at a file
with pytest.raises(OSError):
with paths.set_temp_cache(fn):
pass
assert dldir == _get_download_cache_loc()
assert get_file_contents(str(fn)) == ct
# Now the cache directory is normal but the subdirectory it wants
# to make is a file
cd = tmp_path / "astropy"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto one level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto another level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download" / "url"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
def test_get_fileobj_str(a_file):
fn, c = a_file
with get_readable_fileobj(str(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_localpath(a_file):
fn, c = a_file
with get_readable_fileobj(py.path.local(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_pathlib(a_file):
fn, c = a_file
with get_readable_fileobj(pathlib.Path(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_binary(a_binary_file):
fn, c = a_binary_file
with get_readable_fileobj(fn, encoding="binary") as rf:
assert rf.read() == c
def test_get_fileobj_already_open_text(a_file):
fn, c = a_file
with open(fn) as f:
with get_readable_fileobj(f) as rf:
with pytest.raises(TypeError):
rf.read()
def test_get_fileobj_already_open_binary(a_file):
fn, c = a_file
with open(fn, "rb") as f:
with get_readable_fileobj(f) as rf:
assert rf.read() == c
def test_get_fileobj_binary_already_open_binary(a_binary_file):
fn, c = a_binary_file
with open(fn, "rb") as f:
with get_readable_fileobj(f, encoding="binary") as rf:
assert rf.read() == c
def test_cache_contents_not_writable(temp_cache, valid_urls):
c = cache_contents()
with pytest.raises(TypeError):
c["foo"] = 7
u, _ = next(valid_urls)
download_file(u, cache=True)
c = cache_contents()
assert u in c
with pytest.raises(TypeError):
c["foo"] = 7
def test_cache_relocatable(tmp_path, valid_urls):
u, c = next(valid_urls)
d1 = tmp_path / "1"
d2 = tmp_path / "2"
os.mkdir(d1)
with paths.set_temp_cache(d1):
p1 = download_file(u, cache=True)
assert is_url_in_cache(u)
assert get_file_contents(p1) == c
shutil.copytree(d1, d2)
clear_download_cache()
with paths.set_temp_cache(d2):
assert is_url_in_cache(u)
p2 = download_file(u, cache=True)
assert p1 != p2
assert os.path.exists(p2)
clear_download_cache(p2)
check_download_cache()
def test_get_readable_fileobj_cleans_up_temporary_files(tmp_path, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = url_to(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, "tempdir", str(tmp_path))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url) as f:
f.read()
# Get listing of files in temporary directory
tempdir_listing = list(tmp_path.iterdir())
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert (
f.read().rstrip()
== "This file is used in the test_local_data_* testing functions\nCONTENT"
)
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj:
with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
# assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
def test_download_file_wrong_size(monkeypatch):
@contextlib.contextmanager
def mockurl(remote_url, timeout=None):
yield MockURL()
def mockurl_builder(*args, tlscontext=None, **kwargs):
mock_opener = type("MockOpener", (object,), {})()
mock_opener.open = mockurl
return mock_opener
class MockURL:
def __init__(self):
self.reader = io.BytesIO(b"a" * real_length)
def info(self):
return {"Content-Length": str(report_length)}
def read(self, length=None):
return self.reader.read(length)
monkeypatch.setattr(astropy.utils.data, "_build_urlopener", mockurl_builder)
with pytest.raises(urllib.error.ContentTooShortError):
report_length = 1024
real_length = 1023
download_file(TESTURL, cache=False)
with pytest.raises(urllib.error.URLError):
report_length = 1023
real_length = 1024
download_file(TESTURL, cache=False)
report_length = 1023
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
report_length = None
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
def test_can_make_directories_readonly(tmp_path):
try:
with readonly_dir(tmp_path):
assert is_dir_readonly(tmp_path)
except AssertionError:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip(
"We are root, we can't make a directory un-writable with chmod."
)
elif platform.system() == "Windows":
pytest.skip(
"It seems we can't make a driectory un-writable under Windows "
"with chmod, in spite of the documentation."
)
else:
raise
def test_can_make_files_readonly(tmp_path):
fn = tmp_path / "test"
c = "contents\n"
with open(fn, "w") as f:
f.write(c)
with readonly_dir(tmp_path):
try:
with open(fn, "w+") as f:
f.write("more contents\n")
except PermissionError:
pass
else:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip("We are root, we can't make a file un-writable with chmod.")
assert get_file_contents(fn) == c
def test_read_cache_readonly(readonly_cache):
assert cache_contents() == readonly_cache
def test_download_file_cache_readonly(readonly_cache):
for u in readonly_cache:
f = download_file(u, cache=True)
assert f == readonly_cache[u]
def test_import_file_cache_readonly(readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert get_file_contents(f) == c
assert not is_url_in_cache(u)
def test_download_file_cache_readonly_update(readonly_cache):
for u in readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != readonly_cache[u]
assert compute_hash(f) == compute_hash(readonly_cache[u])
def test_check_download_cache_works_if_readonly(readonly_cache):
check_download_cache()
# On Windows I can't make directories readonly. On CircleCI I can't make
# anything readonly because the test suite runs as root. So on those platforms
# none of the "real" tests above can be run. I can use monkeypatch to trigger
# the readonly code paths, see the "fake" versions of the tests below, but I
# don't totally trust those to completely explore what happens either, so we
# have both. I couldn't see an easy way to parameterize over fixtures and share
# tests.
def test_read_cache_fake_readonly(fake_readonly_cache):
assert cache_contents() == fake_readonly_cache
def test_download_file_cache_fake_readonly(fake_readonly_cache):
for u in fake_readonly_cache:
f = download_file(u, cache=True)
assert f == fake_readonly_cache[u]
def test_mkdtemp_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
tempfile.mkdtemp()
def test_TD_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
with TemporaryDirectory():
pass
def test_import_file_cache_fake_readonly(fake_readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert not is_url_in_cache(u)
assert get_file_contents(f) == c
def test_download_file_cache_fake_readonly_update(fake_readonly_cache):
for u in fake_readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != fake_readonly_cache[u]
assert compute_hash(f) == compute_hash(fake_readonly_cache[u])
def test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):
check_download_cache()
def test_pkgname_isolation(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True, pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() < cache_total_size(pkgname=a)
for u, _ in islice(valid_urls, FEW + 1):
download_file(u, cache=True)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() > cache_total_size(pkgname=a)
assert set(get_cached_urls()) == set(cache_contents().keys())
assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())
for i in get_cached_urls():
assert is_url_in_cache(i)
assert not is_url_in_cache(i, pkgname=a)
for i in get_cached_urls(pkgname=a):
assert not is_url_in_cache(i)
assert is_url_in_cache(i, pkgname=a)
# FIXME: need to break a cache to test whether we check the right one
check_download_cache()
check_download_cache(pkgname=a)
# FIXME: check that cache='update' works
u = get_cached_urls()[0]
with pytest.raises(KeyError):
download_file(u, cache=True, sources=[], pkgname=a)
clear_download_cache(u, pkgname=a)
assert len(get_cached_urls()) == FEW + 1, "wrong pkgname should do nothing"
assert len(get_cached_urls(pkgname=a)) == FEW, "wrong pkgname should do nothing"
f = download_file(u, sources=[], cache=True)
with pytest.raises(RuntimeError):
clear_download_cache(f, pkgname=a)
ua = get_cached_urls(pkgname=a)[0]
with pytest.raises(KeyError):
download_file(ua, cache=True, sources=[])
fa = download_file(ua, sources=[], cache=True, pkgname=a)
with pytest.raises(RuntimeError):
clear_download_cache(fa)
clear_download_cache(ua, pkgname=a)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(u)
assert len(get_cached_urls()) == FEW
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(pkgname=a)
assert len(get_cached_urls()) == FEW
assert not get_cached_urls(pkgname=a)
clear_download_cache()
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
def test_transport_cache_via_zip(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f, pkgname=a)
check_download_cache()
check_download_cache(pkgname=a)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
cca = cache_contents(pkgname=a)
for k, v in cache_contents().items():
assert v != cca[k]
assert get_file_contents(v) == get_file_contents(cca[k])
clear_download_cache()
with io.BytesIO() as f:
export_download_cache(f, pkgname=a)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
def test_download_parallel_respects_pkgname(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)], pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_removal_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
clear_download_cache(u)
assert not is_url_in_cache(u)
check_download_cache()
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_update_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert is_url_in_cache(u)
def test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
clear_download_cache(u)
def test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_no_allow_internet(temp_cache, valid_urls):
u, c = next(valid_urls)
with conf.set_temp("allow_internet", False):
with pytest.raises(urllib.error.URLError):
download_file(u)
assert not is_url_in_cache(u)
with pytest.raises(urllib.error.URLError):
# This will trigger the remote data error if it's allowed to touch the internet
download_file(TESTURL)
def test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bad_filename = os.path.join(dldir, "contents")
assert is_url_in_cache(u)
clear_download_cache(bad_filename)
assert is_url_in_cache(u)
def test_clear_download_cache_variants(temp_cache, valid_urls):
# deletion by contents filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(f)
assert not is_url_in_cache(u)
# deletion by url filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.join(os.path.dirname(f), "url"))
assert not is_url_in_cache(u)
# deletion by hash directory name
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f))
assert not is_url_in_cache(u)
# deletion by directory name with trailing slash
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f) + "/")
assert not is_url_in_cache(u)
# deletion by hash of file contents
u, c = next(valid_urls)
f = download_file(u, cache=True)
h = compute_hash(f)
clear_download_cache(h)
assert not is_url_in_cache(u)
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_ftp_tls_auto(temp_cache):
url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.all" # noqa: E501
download_file(url)
@pytest.mark.parametrize("base", ["http://example.com", "https://example.com"])
def test_url_trailing_slash(temp_cache, valid_urls, base):
slash = base + "/"
no_slash = base
u, c = next(valid_urls)
download_file(slash, cache=True, sources=[u])
assert is_url_in_cache(no_slash)
download_file(no_slash, cache=True, sources=[])
clear_download_cache(no_slash)
assert not is_url_in_cache(no_slash)
assert not is_url_in_cache(slash)
download_file(no_slash, cache=True, sources=[u])
# see if implicit check_download_cache squawks
def test_empty_url(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file("file://", cache=True, sources=[u])
assert not is_url_in_cache("file:///")
@pytest.mark.remote_data
def test_download_ftp_file_properly_handles_socket_error():
faulty_url = "ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all"
with pytest.raises(urllib.error.URLError) as excinfo:
download_file(faulty_url)
errmsg = excinfo.exconly()
found_msg = False
possible_msgs = [
"Name or service not known",
"nodename nor servname provided, or not known",
"getaddrinfo failed",
"Temporary failure in name resolution",
"No address associated with hostname",
]
for cur_msg in possible_msgs:
if cur_msg in errmsg:
found_msg = True
break
assert found_msg, f'Got {errmsg}, expected one of these: {",".join(possible_msgs)}'
@pytest.mark.parametrize(
("s", "ans"),
[
("http://googlecom", True),
("https://google.com", True),
("ftp://google.com", True),
("sftp://google.com", True),
("ssh://google.com", True),
("file:///c:/path/to/the%20file.txt", True),
("google.com", False),
("C:\\\\path\\\\file.docx", False),
("data://file", False),
],
)
def test_string_is_url_check(s, ans):
assert is_url(s) is ans
|
99f052829d5d271a90add8ae9b8ec467bff45be57cf6e85e90eb902a7d10228d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Helpers for letting numpy functions interact with Masked arrays.
The module supplies helper routines for numpy functions that propagate
masks appropriately., for use in the ``__array_function__``
implementation of `~astropy.utils.masked.MaskedNDArray`. They are not
very useful on their own, but the ones with docstrings are included in
the documentation so that there is a place to find out how the mask is
interpreted.
"""
import numpy as np
from astropy.units.quantity_helper.function_helpers import FunctionAssigner
from astropy.utils.compat import NUMPY_LT_1_23
# This module should not really be imported, but we define __all__
# such that sphinx can typeset the functions with docstrings.
# The latter are added to __all__ at the end.
__all__ = [
"MASKED_SAFE_FUNCTIONS",
"APPLY_TO_BOTH_FUNCTIONS",
"DISPATCHED_FUNCTIONS",
"UNSUPPORTED_FUNCTIONS",
]
MASKED_SAFE_FUNCTIONS = set()
"""Set of functions that work fine on Masked classes already.
Most of these internally use `numpy.ufunc` or other functions that
are already covered.
"""
APPLY_TO_BOTH_FUNCTIONS = {}
"""Dict of functions that should apply to both data and mask.
The `dict` is keyed by the numpy function and the values are functions
that take the input arguments of the numpy function and organize these
for passing the data and mask to the numpy function.
Returns
-------
data_args : tuple
Arguments to pass on to the numpy function for the unmasked data.
mask_args : tuple
Arguments to pass on to the numpy function for the masked data.
kwargs : dict
Keyword arguments to pass on for both unmasked data and mask.
out : `~astropy.utils.masked.Masked` instance or None
Optional instance in which to store the output.
Raises
------
NotImplementedError
When an arguments is masked when it should not be or vice versa.
"""
DISPATCHED_FUNCTIONS = {}
"""Dict of functions that provide the numpy function's functionality.
These are for more complicated versions where the numpy function itself
cannot easily be used. It should return either the result of the
function, or a tuple consisting of the unmasked result, the mask for the
result and a possible output instance.
It should raise `NotImplementedError` if one of the arguments is masked
when it should not be or vice versa.
"""
UNSUPPORTED_FUNCTIONS = set()
"""Set of numpy functions that are not supported for masked arrays.
For most, masked input simply makes no sense, but for others it may have
been lack of time. Issues or PRs for support for functions are welcome.
"""
# Almost all from np.core.fromnumeric defer to methods so are OK.
MASKED_SAFE_FUNCTIONS |= {
getattr(np, name)
for name in np.core.fromnumeric.__all__
if name not in {"choose", "put", "resize", "searchsorted", "where", "alen"}
}
MASKED_SAFE_FUNCTIONS |= {
# built-in from multiarray
np.may_share_memory, np.can_cast, np.min_scalar_type, np.result_type,
np.shares_memory,
# np.core.arrayprint
np.array_repr,
# np.core.function_base
np.linspace, np.logspace, np.geomspace,
# np.core.numeric
np.isclose, np.allclose, np.flatnonzero, np.argwhere,
# np.core.shape_base
np.atleast_1d, np.atleast_2d, np.atleast_3d, np.stack, np.hstack, np.vstack,
# np.lib.function_base
np.average, np.diff, np.extract, np.meshgrid, np.trapz, np.gradient,
# np.lib.index_tricks
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.fill_diagonal,
# np.lib.shape_base
np.column_stack, np.row_stack, np.dstack,
np.array_split, np.split, np.hsplit, np.vsplit, np.dsplit,
np.expand_dims, np.apply_along_axis, np.kron, np.tile,
np.take_along_axis, np.put_along_axis,
# np.lib.type_check (all but asfarray, nan_to_num)
np.iscomplexobj, np.isrealobj, np.imag, np.isreal, np.real,
np.real_if_close, np.common_type,
# np.lib.ufunclike
np.fix, np.isneginf, np.isposinf,
# np.lib.function_base
np.angle, np.i0,
} # fmt: skip
IGNORED_FUNCTIONS = {
# I/O - useless for Masked, since no way to store the mask.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
IGNORED_FUNCTIONS |= {
np.pad, np.searchsorted, np.digitize,
np.is_busday, np.busday_count, np.busday_offset,
# numpy.lib.function_base
np.cov, np.corrcoef, np.trim_zeros,
# numpy.core.numeric
np.correlate, np.convolve,
# numpy.lib.histograms
np.histogram, np.histogram2d, np.histogramdd, np.histogram_bin_edges,
# TODO!!
np.dot, np.vdot, np.inner, np.tensordot, np.cross,
np.einsum, np.einsum_path,
} # fmt: skip
# Really should do these...
IGNORED_FUNCTIONS |= {
getattr(np, setopsname) for setopsname in np.lib.arraysetops.__all__
}
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
# Explicitly unsupported functions
UNSUPPORTED_FUNCTIONS |= {
np.unravel_index,
np.ravel_multi_index,
np.ix_,
}
# No support for the functions also not supported by Quantity
# (io, polynomial, etc.).
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
apply_to_both = FunctionAssigner(APPLY_TO_BOTH_FUNCTIONS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
def _get_data_and_masks(*args):
"""Separate out arguments into tuples of data and masks.
An all-False mask is created if an argument does not have a mask.
"""
from .core import Masked
data, masks = Masked._get_data_and_masks(*args)
masks = tuple(
m if m is not None else np.zeros(np.shape(d), bool) for d, m in zip(data, masks)
)
return data, masks
# Following are simple ufunc-like functions which should just copy the mask.
@dispatched_function
def datetime_as_string(arr, *args, **kwargs):
return (np.datetime_as_string(arr.unmasked, *args, **kwargs), arr.mask.copy(), None)
@dispatched_function
def sinc(x):
return np.sinc(x.unmasked), x.mask.copy(), None
@dispatched_function
def iscomplex(x):
return np.iscomplex(x.unmasked), x.mask.copy(), None
@dispatched_function
def unwrap(p, *args, **kwargs):
return np.unwrap(p.unmasked, *args, **kwargs), p.mask.copy(), None
@dispatched_function
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
data = np.nan_to_num(x.unmasked, copy=copy, nan=nan, posinf=posinf, neginf=neginf)
return (data, x.mask.copy(), None) if copy else x
# Following are simple functions related to shapes, where the same function
# should be applied to the data and the mask. They cannot all share the
# same helper, because the first arguments have different names.
@apply_to_both(
helps={np.copy, np.asfarray, np.resize, np.moveaxis, np.rollaxis, np.roll}
)
def masked_a_helper(a, *args, **kwargs):
data, mask = _get_data_and_masks(a)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.flip, np.flipud, np.fliplr, np.rot90, np.triu, np.tril})
def masked_m_helper(m, *args, **kwargs):
data, mask = _get_data_and_masks(m)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.diag, np.diagflat})
def masked_v_helper(v, *args, **kwargs):
data, mask = _get_data_and_masks(v)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.delete})
def masked_arr_helper(array, *args, **kwargs):
data, mask = _get_data_and_masks(array)
return data + args, mask + args, kwargs, None
@apply_to_both
def broadcast_to(array, shape, subok=False):
"""Broadcast array to the given shape.
Like `numpy.broadcast_to`, and applied to both unmasked data and mask.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and mask are allowed, i.e., for ``subok=False``,
a `~astropy.utils.masked.MaskedNDArray` will be returned.
"""
data, mask = _get_data_and_masks(array)
return data, mask, dict(shape=shape, subok=subok), None
@dispatched_function
def outer(a, b, out=None):
return np.multiply.outer(np.ravel(a), np.ravel(b), out=out)
@dispatched_function
def empty_like(prototype, dtype=None, order="K", subok=True, shape=None):
"""Return a new array with the same shape and type as a given array.
Like `numpy.empty_like`, but will add an empty mask.
"""
unmasked = np.empty_like(
prototype.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
if dtype is not None:
dtype = (
np.ma.make_mask_descr(unmasked.dtype)
if unmasked.dtype.names
else np.dtype("?")
)
mask = np.empty_like(
prototype.mask, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, mask, None
@dispatched_function
def zeros_like(a, dtype=None, order="K", subok=True, shape=None):
"""Return an array of zeros with the same shape and type as a given array.
Like `numpy.zeros_like`, but will add an all-false mask.
"""
unmasked = np.zeros_like(
a.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, False, None
@dispatched_function
def ones_like(a, dtype=None, order="K", subok=True, shape=None):
"""Return an array of ones with the same shape and type as a given array.
Like `numpy.ones_like`, but will add an all-false mask.
"""
unmasked = np.ones_like(
a.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, False, None
@dispatched_function
def full_like(a, fill_value, dtype=None, order="K", subok=True, shape=None):
"""Return a full array with the same shape and type as a given array.
Like `numpy.full_like`, but with a mask that is also set.
If ``fill_value`` is `numpy.ma.masked`, the data will be left unset
(i.e., as created by `numpy.empty_like`).
"""
result = np.empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
result[...] = fill_value
return result
@dispatched_function
def put(a, ind, v, mode="raise"):
"""Replaces specified elements of an array with given values.
Like `numpy.put`, but for masked array ``a`` and possibly masked
value ``v``. Masked indices ``ind`` are not supported.
"""
from astropy.utils.masked import Masked
if isinstance(ind, Masked) or not isinstance(a, Masked):
raise NotImplementedError
v_data, v_mask = a._get_data_and_mask(v)
if v_data is not None:
np.put(a.unmasked, ind, v_data, mode=mode)
# v_mask of None will be correctly interpreted as False.
np.put(a.mask, ind, v_mask, mode=mode)
return None
@dispatched_function
def putmask(a, mask, values):
"""Changes elements of an array based on conditional and input values.
Like `numpy.putmask`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(a, Masked):
raise NotImplementedError
values_data, values_mask = a._get_data_and_mask(values)
if values_data is not None:
np.putmask(a.unmasked, mask, values_data)
np.putmask(a.mask, mask, values_mask)
return None
@dispatched_function
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
Like `numpy.place`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
vals_data, vals_mask = arr._get_data_and_mask(vals)
if vals_data is not None:
np.place(arr.unmasked, mask, vals_data)
np.place(arr.mask, mask, vals_mask)
return None
@dispatched_function
def copyto(dst, src, casting="same_kind", where=True):
"""Copies values from one array to another, broadcasting as necessary.
Like `numpy.copyto`, but for masked destination ``dst`` and possibly
masked source ``src``.
"""
from astropy.utils.masked import Masked
if not isinstance(dst, Masked) or isinstance(where, Masked):
raise NotImplementedError
src_data, src_mask = dst._get_data_and_mask(src)
if src_data is not None:
np.copyto(dst.unmasked, src_data, casting=casting, where=where)
if src_mask is not None:
np.copyto(dst.mask, src_mask, where=where)
return None
@dispatched_function
def packbits(a, *args, **kwargs):
result = np.packbits(a.unmasked, *args, **kwargs)
mask = np.packbits(a.mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def unpackbits(a, *args, **kwargs):
result = np.unpackbits(a.unmasked, *args, **kwargs)
mask = np.zeros(a.shape, dtype="u1")
mask[a.mask] = 255
mask = np.unpackbits(mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def bincount(x, weights=None, minlength=0):
"""Count number of occurrences of each value in array of non-negative ints.
Like `numpy.bincount`, but masked entries in ``x`` will be skipped.
Any masked entries in ``weights`` will lead the corresponding bin to
be masked.
"""
from astropy.utils.masked import Masked
if weights is not None:
weights = np.asanyarray(weights)
if isinstance(x, Masked) and x.ndim <= 1:
# let other dimensions lead to errors.
if weights is not None and weights.ndim == x.ndim:
weights = weights[~x.mask]
x = x.unmasked[~x.mask]
mask = None
if weights is not None:
weights, w_mask = Masked._get_data_and_mask(weights)
if w_mask is not None:
mask = np.bincount(x, w_mask.astype(int), minlength=minlength).astype(bool)
result = np.bincount(x, weights, minlength=0)
return result, mask, None
@dispatched_function
def msort(a):
result = a.copy()
result.sort(axis=0)
return result
@dispatched_function
def sort_complex(a):
# Just a copy of function_base.sort_complex, to avoid the asarray.
b = a.copy()
b.sort()
if not issubclass(b.dtype.type, np.complexfloating): # pragma: no cover
if b.dtype.char in "bhBH":
return b.astype("F")
elif b.dtype.char == "g":
return b.astype("G")
else:
return b.astype("D")
else:
return b
@dispatched_function
def concatenate(arrays, axis=0, out=None, dtype=None, casting="same_kind"):
data, masks = _get_data_and_masks(*arrays)
if out is None:
return (
np.concatenate(data, axis=axis, dtype=dtype, casting=casting),
np.concatenate(masks, axis=axis),
None,
)
else:
from astropy.utils.masked import Masked
if not isinstance(out, Masked):
raise NotImplementedError
np.concatenate(masks, out=out.mask, axis=axis)
np.concatenate(data, out=out.unmasked, axis=axis, dtype=dtype, casting=casting)
return out
@apply_to_both
def append(arr, values, axis=None):
data, masks = _get_data_and_masks(arr, values)
return data, masks, dict(axis=axis), None
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
from astropy.utils.masked import Masked
arrays, list_ndim, result_ndim, final_size = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = Masked(np.empty(shape=shape, dtype=dtype, order=order))
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
@dispatched_function
def broadcast_arrays(*args, subok=True):
"""Broadcast arrays to a common shape.
Like `numpy.broadcast_arrays`, applied to both unmasked data and masks.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and masks are allowed, i.e., for ``subok=False``,
`~astropy.utils.masked.MaskedNDArray` instances will be returned.
"""
from .core import Masked
are_masked = [isinstance(arg, Masked) for arg in args]
data = [
(arg.unmasked if is_masked else arg) for arg, is_masked in zip(args, are_masked)
]
results = np.broadcast_arrays(*data, subok=subok)
shape = results[0].shape if isinstance(results, list) else results.shape
masks = [
(np.broadcast_to(arg.mask, shape, subok=subok) if is_masked else None)
for arg, is_masked in zip(args, are_masked)
]
results = [
(Masked(result, mask) if mask is not None else result)
for (result, mask) in zip(results, masks)
]
return results if len(results) > 1 else results[0]
@apply_to_both
def insert(arr, obj, values, axis=None):
"""Insert values along the given axis before the given indices.
Like `numpy.insert` but for possibly masked ``arr`` and ``values``.
Masked ``obj`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(obj, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
(arr_data, val_data), (arr_mask, val_mask) = _get_data_and_masks(arr, values)
return ((arr_data, obj, val_data, axis), (arr_mask, obj, val_mask, axis), {}, None)
@dispatched_function
def count_nonzero(a, axis=None, *, keepdims=False):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis, keepdims=keepdims)
def _masked_median_1d(a, overwrite_input):
# TODO: need an in-place mask-sorting option.
unmasked = a.unmasked[~a.mask]
if unmasked.size:
return a.from_unmasked(np.median(unmasked, overwrite_input=overwrite_input))
else:
return a.from_unmasked(np.zeros_like(a.unmasked, shape=(1,))[0], mask=True)
def _masked_median(a, axis=None, out=None, overwrite_input=False):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_median_1d(part, overwrite_input)
else:
result = np.apply_along_axis(_masked_median_1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
@dispatched_function
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
from astropy.utils.masked import Masked
if out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
r, k = np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out, overwrite_input=overwrite_input
)
return (r.reshape(k) if keepdims else r) if out is None else out
def _masked_quantile_1d(a, q, **kwargs):
"""
Private function for rank 1 arrays. Compute quantile ignoring NaNs.
See nanpercentile for parameter usage
"""
unmasked = a.unmasked[~a.mask]
if unmasked.size:
result = np.lib.function_base._quantile_unchecked(unmasked, q, **kwargs)
return a.from_unmasked(result)
else:
return a.from_unmasked(np.zeros_like(a.unmasked, shape=q.shape), True)
def _masked_quantile(a, q, axis=None, out=None, **kwargs):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_quantile_1d(part, q, **kwargs)
else:
result = np.apply_along_axis(_masked_quantile_1d, axis, a, q, **kwargs)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
@dispatched_function
def quantile(a, q, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if isinstance(q, Masked) or out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
q = np.asanyarray(q)
if not np.lib.function_base._quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
keepdims = kwargs.pop("keepdims", False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs
)
return (r.reshape(k) if keepdims else r) if out is None else out
@dispatched_function
def percentile(a, q, *args, **kwargs):
q = np.true_divide(q, 100)
return quantile(a, q, *args, **kwargs)
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
(a1d, a2d), (a1m, a2m) = _get_data_and_masks(a1, a2)
if a1d.shape != a2d.shape:
return False
equal = a1d == a2d
if equal_nan:
equal |= np.isnan(a1d) & np.isnan(a2d)
return bool((equal | a1m | a2m).all())
@dispatched_function
def array_equiv(a1, a2):
return bool((a1 == a2).all())
@dispatched_function
def where(condition, *args):
from astropy.utils.masked import Masked
if not args:
return condition.nonzero(), None, None
condition, c_mask = Masked._get_data_and_mask(condition)
data, masks = _get_data_and_masks(*args)
unmasked = np.where(condition, *data)
mask = np.where(condition, *masks)
if c_mask is not None:
mask |= c_mask
return Masked(unmasked, mask=mask)
@dispatched_function
def choose(a, choices, out=None, mode="raise"):
"""Construct an array from an index array and a set of arrays to choose from.
Like `numpy.choose`. Masked indices in ``a`` will lead to masked output
values and underlying data values are ignored if out of bounds (for
``mode='raise'``). Any values masked in ``choices`` will be propagated
if chosen.
"""
from astropy.utils.masked import Masked
a_data, a_mask = Masked._get_data_and_mask(a)
if a_mask is not None and mode == "raise":
# Avoid raising on masked indices.
a_data = a.filled(fill_value=0)
kwargs = {"mode": mode}
if out is not None:
if not isinstance(out, Masked):
raise NotImplementedError
kwargs["out"] = out.unmasked
data, masks = _get_data_and_masks(*choices)
data_chosen = np.choose(a_data, data, **kwargs)
if out is not None:
kwargs["out"] = out.mask
mask_chosen = np.choose(a_data, masks, **kwargs)
if a_mask is not None:
mask_chosen |= a_mask
return Masked(data_chosen, mask_chosen) if out is None else out
@apply_to_both
def select(condlist, choicelist, default=0):
"""Return an array drawn from elements in choicelist, depending on conditions.
Like `numpy.select`, with masks in ``choicelist`` are propagated.
Any masks in ``condlist`` are ignored.
"""
from astropy.utils.masked import Masked
condlist = [c.unmasked if isinstance(c, Masked) else c for c in condlist]
data_list, mask_list = _get_data_and_masks(*choicelist)
default = Masked(default) if default is not np.ma.masked else Masked(0, mask=True)
return (
(condlist, data_list, default.unmasked),
(condlist, mask_list, default.mask),
{},
None,
)
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
"""Evaluate a piecewise-defined function.
Like `numpy.piecewise` but for masked input array ``x``.
Any masks in ``condlist`` are ignored.
"""
# Copied implementation from numpy.lib.function_base.piecewise,
# just to ensure output is Masked.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
): # pragma: no cover
condlist = [condlist]
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
# The one real change...
y = np.zeros_like(x)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
for item, value in zip(where, what):
y[item] = value
return y
@dispatched_function
def interp(x, xp, fp, *args, **kwargs):
"""One-dimensional linear interpolation.
Like `numpy.interp`, but any masked points in ``xp`` and ``fp``
are ignored. Any masked values in ``x`` will still be evaluated,
but masked on output.
"""
from astropy.utils.masked import Masked
xd, xm = Masked._get_data_and_mask(x)
if isinstance(xp, Masked) or isinstance(fp, Masked):
(xp, fp), (xpm, fpm) = _get_data_and_masks(xp, fp)
if xp.ndim == fp.ndim == 1:
# Avoid making arrays 1-D; will just raise below.
m = xpm | fpm
xp = xp[~m]
fp = fp[~m]
result = np.interp(xd, xp, fp, *args, **kwargs)
return result if xm is None else Masked(result, xm.copy())
@dispatched_function
def lexsort(keys, axis=-1):
"""Perform an indirect stable sort using a sequence of keys.
Like `numpy.lexsort` but for possibly masked ``keys``. Masked
values are sorted towards the end for each key.
"""
# Sort masks to the end.
from .core import Masked
new_keys = []
for key in keys:
if isinstance(key, Masked):
# If there are other keys below, want to be sure that
# for masked values, those other keys set the order.
new_key = key.unmasked
if new_keys and key.mask.any():
new_key = new_key.copy()
new_key[key.mask] = new_key.flat[0]
new_keys.extend([new_key, key.mask])
else:
new_keys.append(key)
return np.lexsort(new_keys, axis=axis)
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is Masked.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
return val
class MaskedFormat:
"""Formatter for masked array scalars.
For use in `numpy.array2string`, wrapping the regular formatters such
that if a value is masked, its formatted string is replaced.
Typically initialized using the ``from_data`` class method.
"""
def __init__(self, format_function):
self.format_function = format_function
# Special case for structured void and subarray: we need to make all the
# format functions for the items masked as well.
# TODO: maybe is a separate class is more logical?
ffs = getattr(format_function, "format_functions", None)
if ffs:
# StructuredVoidFormat: multiple format functions to be changed.
self.format_function.format_functions = [MaskedFormat(ff) for ff in ffs]
ff = getattr(format_function, "format_function", None)
if ff:
# SubarrayFormat: change format function for the elements.
self.format_function.format_function = MaskedFormat(ff)
def __call__(self, x):
if x.dtype.names:
# The replacement of x with a list is needed because the function
# inside StructuredVoidFormat iterates over x, which works for an
# np.void but not an array scalar.
return self.format_function([x[field] for field in x.dtype.names])
if x.shape:
# For a subarray pass on the data directly, since the
# items will be iterated on inside the function.
return self.format_function(x)
# Single element: first just typeset it normally, replace with masked
# string if needed.
string = self.format_function(x.unmasked[()])
if x.mask:
# Strikethrough would be neat, but terminal needs a different
# formatting than, say, jupyter notebook.
# return "\x1B[9m"+string+"\x1B[29m"
# return ''.join(s+'\u0336' for s in string)
n = min(3, max(1, len(string)))
return " " * (len(string) - n) + "\u2014" * n
else:
return string
@classmethod
def from_data(cls, data, **options):
from numpy.core.arrayprint import _get_format_function
return cls(_get_format_function(data, **options))
def _array2string(a, options, separator=" ", prefix=""):
# Mostly copied from numpy.core.arrayprint, except:
# - The format function is wrapped in a mask-aware class;
# - Arrays scalars are not cast as arrays.
from numpy.core.arrayprint import _formatArray, _leading_trailing
data = np.asarray(a)
if a.size > options["threshold"]:
summary_insert = "..."
data = _leading_trailing(data, options["edgeitems"])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = MaskedFormat.from_data(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " " * len(prefix)
lst = _formatArray(
a,
format_function,
options["linewidth"],
next_line_prefix,
separator,
options["edgeitems"],
summary_insert,
options["legacy"],
)
return lst
@dispatched_function
def array2string(
a,
max_line_width=None,
precision=None,
suppress_small=None,
separator=" ",
prefix="",
style=np._NoValue,
formatter=None,
threshold=None,
edgeitems=None,
sign=None,
floatmode=None,
suffix="",
):
# Copied from numpy.core.arrayprint, but using _array2string above.
from numpy.core.arrayprint import _format_options, _make_options_dict
overrides = _make_options_dict(
precision,
threshold,
edgeitems,
max_line_width,
suppress_small,
None,
None,
sign,
formatter,
floatmode,
)
options = _format_options.copy()
options.update(overrides)
options["linewidth"] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
@dispatched_function
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
# Override to avoid special treatment of array scalars.
return array2string(a, max_line_width, precision, suppress_small, " ", "")
# For the nanfunctions, we just treat any nan as an additional mask.
_nanfunc_fill_values = {"nansum": 0, "nancumsum": 0, "nanprod": 1, "nancumprod": 1}
def masked_nanfunc(nanfuncname):
np_func = getattr(np, nanfuncname[3:])
fill_value = _nanfunc_fill_values.get(nanfuncname, None)
def nanfunc(a, *args, **kwargs):
from astropy.utils.masked import Masked
a, mask = Masked._get_data_and_mask(a)
if issubclass(a.dtype.type, np.inexact):
nans = np.isnan(a)
mask = nans if mask is None else (nans | mask)
if mask is not None:
a = Masked(a, mask)
if fill_value is not None:
a = a.filled(fill_value)
return np_func(a, *args, **kwargs)
doc = f"Like `numpy.{nanfuncname}`, skipping masked values as well.\n\n"
if fill_value is not None:
# sum, cumsum, prod, cumprod
doc += (
f"Masked/NaN values are replaced with {fill_value}. "
"The output is not masked."
)
elif "arg" in nanfuncname:
doc += (
"No exceptions are raised for fully masked/NaN slices.\n"
"Instead, these give index 0."
)
else:
doc += (
"No warnings are given for fully masked/NaN slices.\n"
"Instead, they are masked in the output."
)
nanfunc.__doc__ = doc
nanfunc.__name__ = nanfuncname
return nanfunc
for nanfuncname in np.lib.nanfunctions.__all__:
globals()[nanfuncname] = dispatched_function(
masked_nanfunc(nanfuncname), helps=getattr(np, nanfuncname)
)
# Add any dispatched or helper function that has a docstring to
# __all__, so they will be typeset by sphinx. The logic is that for
# those presumably the use of the mask is not entirely obvious.
__all__ += sorted(
helper.__name__
for helper in (
set(APPLY_TO_BOTH_FUNCTIONS.values()) | set(DISPATCHED_FUNCTIONS.values())
)
if helper.__doc__
)
|
f773e131f3ba01cf54b72f22457716cb6b24f234c37854c2733fc46f234a3f13 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
import numpy as np
from astropy.utils import minversion
__all__ = [
"NUMPY_LT_1_21_1",
"NUMPY_LT_1_22",
"NUMPY_LT_1_22_1",
"NUMPY_LT_1_23",
"NUMPY_LT_1_24",
]
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_21_1 = not minversion(np, "1.21.1")
NUMPY_LT_1_22 = not minversion(np, "1.22")
NUMPY_LT_1_22_1 = not minversion(np, "1.22.1")
NUMPY_LT_1_23 = not minversion(np, "1.23")
NUMPY_LT_1_24 = not minversion(np, "1.24dev0")
|
1da1c02e839d922829728f81558da3f1978f36ec0bf83b14ea717bd466058e52 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test all functions covered by __array_function__.
Here, run through all functions, with simple tests just to check the helpers.
More complicated tests of functionality, including with subclasses, are done
in test_functions.
TODO: finish full coverage (see also `~astropy.utils.masked.function_helpers`)
- np.linalg
- np.fft (is there any point?)
- np.lib.nanfunctions
"""
import inspect
import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy.units.tests.test_quantity_non_ufuncs import get_wrapped_functions
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.masked.function_helpers import (
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
IGNORED_FUNCTIONS,
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .test_masked import MaskedArraySetup, assert_masked_equal
all_wrapped_functions = get_wrapped_functions(np)
all_wrapped = set(all_wrapped_functions.values())
class BasicTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = Masked(
func(self.a, *args, **kwargs), mask=func(self.mask_a, *args, **kwargs)
)
assert_masked_equal(out, expected)
def check2(self, func, *args, **kwargs):
out = func(self.ma, self.mb, *args, **kwargs)
expected = Masked(
func(self.a, self.b, *args, **kwargs),
mask=func(self.mask_a, self.mask_b, *args, **kwargs),
)
if isinstance(out, (tuple, list)):
for o, x in zip(out, expected):
assert_masked_equal(o, x)
else:
assert_masked_equal(out, expected)
class NoMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o, expected)
class InvariantMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, self.mask_a)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.ma) == (2, 3)
def test_size(self):
assert np.size(self.ma) == 6
def test_ndim(self):
assert np.ndim(self.ma) == 2
class TestShapeManipulation(BasicTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (6, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
self.check(np.atleast_1d)
o, so = np.atleast_1d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1,)
def test_atleast_2d(self):
self.check(np.atleast_2d)
o, so = np.atleast_2d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1)
def test_atleast_3d(self):
self.check(np.atleast_3d)
o, so = np.atleast_3d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1, 1)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.mc)
assert o.shape == o.mask.shape == (2,)
assert_array_equal(o.unmasked, self.c.squeeze())
assert_array_equal(o.mask, self.mask_c.squeeze())
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
self.check(np.broadcast_to, (3, 2, 3))
self.check(np.broadcast_to, (3, 2, 3), subok=False)
def test_broadcast_arrays(self):
self.check2(np.broadcast_arrays)
self.check2(np.broadcast_arrays, subok=False)
class TestArgFunctions(MaskedArraySetup):
def check(self, function, *args, fill_value=np.nan, **kwargs):
o = function(self.ma, *args, **kwargs)
a_filled = self.ma.filled(fill_value=fill_value)
expected = function(a_filled, *args, **kwargs)
assert_array_equal(o, expected)
def test_argmin(self):
self.check(np.argmin, fill_value=np.inf)
def test_argmax(self):
self.check(np.argmax, fill_value=-np.inf)
def test_argsort(self):
self.check(np.argsort, fill_value=np.nan)
def test_lexsort(self):
self.check(np.lexsort, fill_value=np.nan)
def test_nonzero(self):
self.check(np.nonzero, fill_value=0.0)
@pytest.mark.filterwarnings("ignore:Calling nonzero on 0d arrays is deprecated")
def test_nonzero_0d(self):
res1 = Masked(1, mask=False).nonzero()
assert len(res1) == 1
assert_array_equal(res1[0], np.ones(()).nonzero()[0])
res2 = Masked(1, mask=True).nonzero()
assert len(res2) == 1
assert_array_equal(res2[0], np.zeros(()).nonzero()[0])
def test_argwhere(self):
self.check(np.argwhere, fill_value=0.0)
def test_argpartition(self):
self.check(np.argpartition, 2, fill_value=np.inf)
def test_flatnonzero(self):
self.check(np.flatnonzero, fill_value=0.0)
class TestAlongAxis(MaskedArraySetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
out = np.take_along_axis(self.ma, indices, axis=0)
expected = np.take_along_axis(self.a, indices, axis=0)
expected_mask = np.take_along_axis(self.mask_a, indices, axis=0)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_put_along_axis(self):
ma = self.ma.copy()
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
np.put_along_axis(ma, indices, axis=0, values=-1)
expected = self.a.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, self.mask_a)
np.put_along_axis(ma, indices, axis=0, values=np.ma.masked)
assert_array_equal(ma.unmasked, expected)
expected_mask = self.mask_a.copy()
np.put_along_axis(expected_mask, indices, axis=0, values=True)
assert_array_equal(ma.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.ma)
expected = np.apply_along_axis(np.square, axis, self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.parametrize("axes", [(1,), 0, (0, -1)])
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.mean(np.square(x), axis)
out = np.apply_over_axes(function, self.ma, axes)
expected = self.ma
for axis in axes if isinstance(axes, tuple) else (axes,):
expected = (expected**2).mean(axis, keepdims=True)
assert_array_equal(out.unmasked, expected.unmasked)
assert_array_equal(out.mask, expected.mask)
def test_apply_over_axes_no_reduction(self):
out = np.apply_over_axes(np.cumsum, self.ma, 0)
expected = self.ma.cumsum(axis=0)
assert_masked_equal(out, expected)
def test_apply_over_axes_wrong_size(self):
with pytest.raises(ValueError, match="not.*correct shape"):
np.apply_over_axes(lambda x, axis: x[..., np.newaxis], self.ma, 0)
class TestIndicesFrom(NoMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.arange(9).reshape(3, 3)
self.mask_a = np.eye(3, dtype=bool)
self.ma = Masked(self.a, self.mask_a)
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.array([1 + 2j, 3 + 4j])
self.mask_a = np.array([True, False])
self.ma = Masked(self.a, mask=self.mask_a)
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantMaskTestSetup):
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.ma)
assert_array_equal(copy, self.ma)
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.ma)
assert_array_equal(farray, self.ma)
class TestArrayCreation(MaskedArraySetup):
def test_empty_like(self):
o = np.empty_like(self.ma)
assert o.shape == (2, 3)
assert isinstance(o, Masked)
assert isinstance(o, np.ndarray)
o2 = np.empty_like(prototype=self.ma)
assert o2.shape == (2, 3)
assert isinstance(o2, Masked)
assert isinstance(o2, np.ndarray)
o3 = np.empty_like(self.ma, subok=False)
assert type(o3) is MaskedNDArray
def test_zeros_like(self):
o = np.zeros_like(self.ma)
assert_array_equal(o.unmasked, np.zeros_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.zeros_like(a=self.ma)
assert_array_equal(o2.unmasked, np.zeros_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
def test_ones_like(self):
o = np.ones_like(self.ma)
assert_array_equal(o.unmasked, np.ones_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.ones_like(a=self.ma)
assert_array_equal(o2.unmasked, np.ones_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
@pytest.mark.parametrize("value", [0.5, Masked(0.5, mask=True), np.ma.masked])
def test_full_like(self, value):
o = np.full_like(self.ma, value)
if value is np.ma.masked:
expected = Masked(o.unmasked, True)
else:
expected = Masked(np.empty_like(self.a))
expected[...] = value
assert_array_equal(o.unmasked, expected.unmasked)
assert_array_equal(o.mask, expected.mask)
class TestAccessingParts(BasicTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diag_1d_input(self):
ma = self.ma.ravel()
o = np.diag(ma)
assert_array_equal(o.unmasked, np.diag(self.a.ravel()))
assert_array_equal(o.mask, np.diag(self.mask_a.ravel()))
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False], self.ma, axis=0)
expected = np.compress([True, False], self.a, axis=0)
expected_mask = np.compress([True, False], self.mask_a, axis=0)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_extract(self):
o = np.extract([True, False, True], self.ma)
expected = np.extract([True, False, True], self.a)
expected_mask = np.extract([True, False, True], self.mask_a)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(MaskedArraySetup):
def test_put(self):
ma = self.ma.copy()
v = Masked([50, 150], [False, True])
np.put(ma, [0, 2], v)
expected = self.a.copy()
np.put(expected, [0, 2], [50, 150])
expected_mask = self.mask_a.copy()
np.put(expected_mask, [0, 2], [False, True])
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
# Indices cannot be masked.
np.put(ma, Masked([0, 2]), v)
with pytest.raises(TypeError):
# Array to put masked values in must be masked.
np.put(self.a.copy(), [0, 2], v)
def test_putmask(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.putmask(ma, mask, values)
expected = self.a.flatten()
np.putmask(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.putmask(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.putmask(self.a.flatten(), mask, values)
def test_place(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked([100, 200], mask=[False, True])
np.place(ma, mask, values)
expected = self.a.flatten()
np.place(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.place(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.place(self.a.flatten(), mask, values)
def test_copyto(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.copyto(ma, values, where=mask)
expected = self.a.flatten()
np.copyto(expected, values.unmasked, where=mask)
expected_mask = self.mask_a.flatten()
np.copyto(expected_mask, values.mask, where=mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.copyto(self.a.flatten(), values, where=mask)
@pytest.mark.parametrize("value", [0.25, np.ma.masked])
def test_fill_diagonal(self, value):
ma = self.ma[:2, :2].copy()
np.fill_diagonal(ma, value)
expected = ma.copy()
expected[np.diag_indices_from(expected)] = value
assert_array_equal(ma.unmasked, expected.unmasked)
assert_array_equal(ma.mask, expected.mask)
class TestRepeat(BasicTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(MaskedArraySetup):
# More tests at TestMaskedArrayConcatenation in test_functions.
def check(self, func, *args, **kwargs):
ma_list = kwargs.pop("ma_list", [self.ma, self.ma])
a_list = [Masked(ma).unmasked for ma in ma_list]
m_list = [Masked(ma).mask for ma in ma_list]
o = func(ma_list, *args, **kwargs)
expected = func(a_list, *args, **kwargs)
expected_mask = func(m_list, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
self.check(np.concatenate, ma_list=[self.a, self.ma])
self.check(np.concatenate, dtype="f4")
out = Masked(np.empty((4, 3)))
result = np.concatenate([self.ma, self.ma], out=out)
assert out is result
expected = np.concatenate([self.a, self.a])
expected_mask = np.concatenate([self.mask_a, self.mask_a])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.concatenate([self.ma, self.ma], out=np.empty((4, 3)))
def test_stack(self):
self.check(np.stack)
def test_column_stack(self):
self.check(np.column_stack)
def test_hstack(self):
self.check(np.hstack)
def test_vstack(self):
self.check(np.vstack)
def test_dstack(self):
self.check(np.dstack)
def test_block(self):
self.check(np.block)
out = np.block([[0.0, Masked(1.0, True)], [Masked(1, False), Masked(2, False)]])
expected = np.array([[0, 1.0], [1, 2]])
expected_mask = np.array([[False, True], [False, False]])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_append(self):
out = np.append(self.ma, self.mc, axis=1)
expected = np.append(self.a, self.c, axis=1)
expected_mask = np.append(self.mask_a, self.mask_c, axis=1)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_insert(self):
obj = (1, 1)
values = Masked([50.0, 25.0], mask=[True, False])
out = np.insert(self.ma.flatten(), obj, values)
expected = np.insert(self.a.flatten(), obj, [50.0, 25.0])
expected_mask = np.insert(self.mask_a.flatten(), obj, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.insert(self.a.flatten(), obj, values)
with pytest.raises(TypeError):
np.insert(self.ma.flatten(), Masked(obj), values)
class TestSplit:
@classmethod
def setup_class(self):
self.a = np.arange(54.0).reshape(3, 3, 6)
self.mask_a = np.zeros(self.a.shape, dtype=bool)
self.mask_a[1, 1, 1] = True
self.mask_a[0, 1, 4] = True
self.mask_a[1, 2, 5] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
expected_mask = func(self.mask_a, *args, **kwargs)
assert len(out) == len(expected)
for o, x, xm in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, xm)
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestMethodLikes(MaskedArraySetup):
def check(self, function, *args, method=None, **kwargs):
if method is None:
method = function.__name__
o = function(self.ma, *args, **kwargs)
x = getattr(self.ma, method)(*args, **kwargs)
assert_masked_equal(o, x)
def test_amax(self):
self.check(np.amax, method="max")
def test_amin(self):
self.check(np.amin, method="min")
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
self.check(np.any)
def test_all(self):
self.check(np.all)
def test_sometrue(self):
self.check(np.sometrue, method="any")
def test_alltrue(self):
self.check(np.alltrue, method="all")
def test_prod(self):
self.check(np.prod)
def test_product(self):
self.check(np.product, method="prod")
def test_cumprod(self):
self.check(np.cumprod)
def test_cumproduct(self):
self.check(np.cumproduct, method="cumprod")
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_, method="round")
def test_around(self):
self.check(np.around, method="round")
def test_clip(self):
self.check(np.clip, 2.0, 4.0)
self.check(np.clip, self.mb, self.mc)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
self.check(np.var)
class TestUfuncLike(InvariantMaskTestSetup):
def test_fix(self):
self.check(np.fix)
def test_angle(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.angle(ma)
expected = np.angle(ma.unmasked)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_i0(self):
self.check(np.i0)
def test_sinc(self):
self.check(np.sinc)
def test_where(self):
mask = [True, False, True]
out = np.where(mask, self.ma, 1000.0)
expected = np.where(mask, self.a, 1000.0)
expected_mask = np.where(mask, self.mask_a, False)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
mask2 = Masked(mask, [True, False, False])
out2 = np.where(mask2, self.ma, 1000.0)
expected2 = np.where(mask, self.a, 1000.0)
expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask
assert_array_equal(out2.unmasked, expected2)
assert_array_equal(out2.mask, expected_mask2)
def test_where_single_arg(self):
m = Masked(np.arange(3), mask=[True, False, False])
out = np.where(m)
expected = m.nonzero()
assert isinstance(out, tuple) and len(out) == 1
assert_array_equal(out[0], expected[0])
def test_where_wrong_number_of_arg(self):
with pytest.raises(ValueError, match="either both or neither"):
np.where([True, False, False], self.a)
def test_choose(self):
a = np.array([0, 1]).reshape((2, 1))
result = np.choose(a, (self.ma, self.mb))
expected = np.choose(a, (self.a, self.b))
expected_mask = np.choose(a, (self.mask_a, self.mask_b))
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.choose(a, (self.ma, self.mb), out=out)
assert result2 is out
assert_array_equal(result2, result)
with pytest.raises(TypeError):
np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected))
def test_choose_masked(self):
ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1))
out = ma.choose((self.ma, self.mb))
expected = np.choose(ma.filled(0), (self.a, self.b))
expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(ValueError):
ma.unmasked.choose((self.ma, self.mb))
@pytest.mark.parametrize("default", [-1.0, np.ma.masked, Masked(-1, mask=True)])
def test_select(self, default):
a, mask_a, ma = self.a, self.mask_a, self.ma
out = np.select([a < 1.5, a > 3.5], [ma, ma + 1], default=default)
expected = np.select(
[a < 1.5, a > 3.5],
[a, a + 1],
default=-1 if default is not np.ma.masked else 0,
)
expected_mask = np.select(
[a < 1.5, a > 3.5],
[mask_a, mask_a],
default=getattr(default, "mask", False),
)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_real_if_close(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.real_if_close(ma)
expected = np.real_if_close(a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_tril(self):
self.check(np.tril)
def test_triu(self):
self.check(np.triu)
def test_unwrap(self):
self.check(np.unwrap)
def test_nan_to_num(self):
self.check(np.nan_to_num)
ma = Masked([np.nan, 1.0], mask=[True, False])
o = np.nan_to_num(ma, copy=False)
assert_masked_equal(o, Masked([0.0, 1.0], mask=[True, False]))
assert ma is o
class TestUfuncLikeTests:
@classmethod
def setup_class(self):
self.a = np.array([[-np.inf, +np.inf, np.nan, 3.0, 4.0]] * 2)
self.mask_a = np.array([[False] * 5, [True] * 4 + [False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([[3.0001], [3.9999]])
self.mask_b = np.array([[True], [False]])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, func):
out = func(self.ma)
expected = func(self.a)
assert type(out) is MaskedNDArray
assert out.dtype.kind == "b"
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
assert not np.may_share_memory(out.mask, self.mask_a)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
o = np.isreal(Masked([1.0 + 1j], mask=False))
assert not o.unmasked and not o.mask
o = np.isreal(Masked([1.0 + 1j], mask=True))
assert not o.unmasked and o.mask
def test_iscomplex(self):
self.check(np.iscomplex)
o = np.iscomplex(Masked([1.0 + 1j], mask=False))
assert o.unmasked and not o.mask
o = np.iscomplex(Masked([1.0 + 1j], mask=True))
assert o.unmasked and o.mask
def test_isclose(self):
out = np.isclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)
expected_mask = self.mask_a | self.mask_b
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_allclose(self):
out = np.allclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)[
self.mask_a | self.mask_b
].all()
assert_array_equal(out, expected)
def test_array_equal(self):
assert not np.array_equal(self.ma, self.ma)
assert not np.array_equal(self.ma, self.a)
assert np.array_equal(self.ma, self.ma, equal_nan=True)
assert np.array_equal(self.ma, self.a, equal_nan=True)
assert not np.array_equal(self.ma, self.mb)
ma2 = self.ma.copy()
ma2.mask |= np.isnan(self.a)
assert np.array_equal(ma2, self.ma)
def test_array_equiv(self):
assert np.array_equiv(self.mb, self.mb)
assert np.array_equiv(self.mb, self.b)
assert not np.array_equiv(self.ma, self.mb)
assert np.array_equiv(self.mb, np.stack([self.mb, self.mb]))
class TestOuterLikeFunctions(MaskedArraySetup):
def test_outer(self):
result = np.outer(self.ma, self.mb)
expected_data = np.outer(self.a.ravel(), self.b.ravel())
expected_mask = np.logical_or.outer(self.mask_a.ravel(), self.mask_b.ravel())
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.outer(self.ma, self.mb, out=out)
assert result2 is out
assert result2 is not result
assert_masked_equal(result2, result)
out2 = np.zeros_like(result.unmasked)
with pytest.raises(TypeError):
np.outer(self.ma, self.mb, out=out2)
def test_kron(self):
result = np.kron(self.ma, self.mb)
expected_data = np.kron(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a, self.mask_b).reshape(
result.shape
)
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
class TestReductionLikeFunctions(MaskedArraySetup):
def test_average(self):
o = np.average(self.ma)
assert_masked_equal(o, self.ma.mean())
o = np.average(self.ma, weights=self.mb, axis=-1)
expected = np.average(self.a, weights=self.b, axis=-1)
expected_mask = (self.mask_a | self.mask_b).any(-1)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_trace(self):
o = np.trace(self.ma)
expected = np.trace(self.a)
expected_mask = np.trace(self.mask_a).astype(bool)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_count_nonzero(self, axis):
o = np.count_nonzero(self.ma, axis=axis)
expected = np.count_nonzero(self.ma.filled(0), axis=axis)
assert_array_equal(o, expected)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestPartitionLikeFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(36.0).reshape(6, 6)
self.mask_a = np.zeros_like(self.a, bool)
# On purpose fill diagonal, so we get all masked elements.
self.mask_a[np.tril_indices_from(self.a)] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, function, *args, **kwargs):
o = function(self.ma, *args, **kwargs)
nanfunc = getattr(np, "nan" + function.__name__)
nanfilled = self.ma.filled(np.nan)
expected = nanfunc(nanfilled, *args, **kwargs)
assert_array_equal(o.filled(np.nan), expected)
assert_array_equal(o.mask, np.isnan(expected))
if not kwargs.get("axis", 1):
# no need to test for all
return
out = np.zeros_like(o)
o2 = function(self.ma, *args, out=out, **kwargs)
assert o2 is out
assert_masked_equal(o2, o)
with pytest.raises(TypeError):
function(self.ma, *args, out=np.zeros_like(expected), **kwargs)
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_median(self, axis):
self.check(np.median, axis=axis)
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_quantile(self, axis):
self.check(np.quantile, q=[0.25, 0.5], axis=axis)
def test_quantile_out_of_range(self):
with pytest.raises(ValueError, match="must be in the range"):
np.quantile(self.ma, q=1.5)
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_percentile(self, axis):
self.check(np.percentile, q=50, axis=axis)
class TestIntDiffFunctions(MaskedArraySetup):
def test_diff(self):
out = np.diff(self.ma)
expected = np.diff(self.a)
expected_mask = self.mask_a[:, 1:] | self.mask_a[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_diff_prepend_append(self):
out = np.diff(self.ma, prepend=Masked(-1, mask=True), append=1)
expected = np.diff(self.a, prepend=-1, append=1.0)
mask = np.concatenate(
[np.ones((2, 1), bool), self.mask_a, np.zeros((2, 1), bool)], axis=-1
)
expected_mask = mask[:, 1:] | mask[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_trapz(self):
ma = self.ma.copy()
ma.mask[1] = False
out = np.trapz(ma)
assert_array_equal(out.unmasked, np.trapz(self.a))
assert_array_equal(out.mask, np.array([True, False]))
def test_gradient(self):
out = np.gradient(self.ma)
expected = np.gradient(self.a)
expected_mask = [
(self.mask_a[1:] | self.mask_a[:-1]).repeat(2, axis=0),
np.stack(
[
self.mask_a[:, 0] | self.mask_a[:, 1],
self.mask_a[:, 0] | self.mask_a[:, 2],
self.mask_a[:, 1] | self.mask_a[:, 2],
],
axis=-1,
),
]
for o, x, m in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestSpaceFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(1.0, 7.0).reshape(2, 3)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([2.5, 10.0, 3.0])
self.mask_b = np.array([False, True, False])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, *args, **kwargs):
out = function(self.ma, self.mb, 5)
expected = function(self.a, self.b, 5)
expected_mask = np.broadcast_to(
self.mask_a | self.mask_b, expected.shape
).copy()
# TODO: make implementation that also ensures start point mask is
# determined just by start point? (as for geomspace in numpy 1.20)?
expected_mask[-1] = self.mask_b
if function is np.geomspace:
expected_mask[0] = self.mask_a
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_linspace(self):
self.check(np.linspace, 5)
def test_logspace(self):
self.check(np.logspace, 10)
def test_geomspace(self):
self.check(np.geomspace, 5)
class TestInterpolationFunctions(MaskedArraySetup):
def test_interp(self):
xp = np.arange(5.0)
fp = np.array([1.0, 5.0, 6.0, 19.0, 20.0])
mask_fp = np.array([False, False, False, True, False])
mfp = Masked(fp, mask=mask_fp)
x = np.array([1.5, 17.0])
mask_x = np.array([False, True])
mx = Masked(x, mask=mask_x)
out = np.interp(mx, xp, mfp)
expected = np.interp(x, xp[~mask_fp], fp[~mask_fp])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_x)
def test_piecewise(self):
condlist = [self.a < 1, self.a >= 1]
out = np.piecewise(self.ma, condlist, [Masked(-1, mask=True), 1.0])
expected = np.piecewise(self.a, condlist, [-1, 1.0])
expected_mask = np.piecewise(self.mask_a, condlist, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
condlist2 = [self.a < 1, self.a >= 3]
out2 = np.piecewise(
self.ma,
condlist2,
[Masked(-1, True), 1, lambda x: Masked(np.full_like(x, 2.0), mask=~x.mask)],
)
expected = np.piecewise(self.a, condlist2, [-1, 1, 2])
expected_mask = np.piecewise(
self.mask_a, condlist2, [True, False, lambda x: ~x]
)
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
with pytest.raises(ValueError, match="with 2 condition"):
np.piecewise(self.ma, condlist2, [])
def test_regression_12978(self):
"""Regression tests for https://github.com/astropy/astropy/pull/12978"""
# This case produced incorrect results
mask = [False, True, False]
x = np.array([1, 2, 3])
xp = Masked(np.array([1, 2, 3]), mask=mask)
fp = Masked(np.array([1, 2, 3]), mask=mask)
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
# This case raised a ValueError
xp = np.array([1, 3])
fp = Masked(np.array([1, 3]))
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
class TestBincount(MaskedArraySetup):
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
mask_i = np.array([True, False, False, True, False, False])
mi = Masked(i, mask=mask_i)
out = np.bincount(mi)
expected = np.bincount(i[~mask_i])
assert_array_equal(out, expected)
w = np.arange(len(i))
mask_w = np.array([True] + [False] * 5)
mw = Masked(w, mask=mask_w)
out2 = np.bincount(i, mw)
expected = np.bincount(i, w)
expected_mask = np.array([False, True, False, False, False])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
out3 = np.bincount(mi, mw)
expected = np.bincount(i[~mask_i], w[~mask_i])
expected_mask = np.array([False, False, False, False, False])
assert_array_equal(out3.unmasked, expected)
assert_array_equal(out3.mask, expected_mask)
class TestSortFunctions(MaskedArraySetup):
def test_sort(self):
o = np.sort(self.ma)
expected = self.ma.copy()
expected.sort()
assert_masked_equal(o, expected)
def test_sort_complex(self):
ma = Masked(
np.array([1 + 2j, 0 + 4j, 3 + 0j, -1 - 1j]),
mask=[True, False, False, False],
)
o = np.sort_complex(ma)
indx = np.lexsort((ma.unmasked.imag, ma.unmasked.real, ma.mask))
expected = ma[indx]
assert_masked_equal(o, expected)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
o = np.msort(self.ma)
expected = np.sort(self.ma, axis=0)
assert_masked_equal(o, expected)
def test_partition(self):
o = np.partition(self.ma, 1)
expected = self.ma.copy()
expected.partition(1)
assert_masked_equal(o, expected)
class TestStringFunctions:
# More elaborate tests done in test_masked.py
@classmethod
def setup_class(self):
self.ma = Masked(np.arange(3), mask=[True, False, False])
def test_array2string(self):
out0 = np.array2string(self.ma)
assert out0 == "[— 1 2]"
# Arguments are interpreted as usual.
out1 = np.array2string(self.ma, separator=", ")
assert out1 == "[—, 1, 2]"
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.ma, separator=", ", formatter={"all": hex})
assert out2 == "[———, 0x1, 0x2]"
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.ma, None, None, None, ", ", "", np._NoValue, {"int": hex}
)
assert out3 == out2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.ma, separator=", ", formatter={"float": hex})
assert out4 == out1
def test_array_repr(self):
out = np.array_repr(self.ma)
assert out == "MaskedNDArray([—, 1, 2])"
ma2 = self.ma.astype("f4")
out2 = np.array_repr(ma2)
assert out2 == "MaskedNDArray([——, 1., 2.], dtype=float32)"
def test_array_str(self):
out = np.array_str(self.ma)
assert out == "[— 1 2]"
class TestBitFunctions:
@classmethod
def setup_class(self):
self.a = np.array([15, 255, 0], dtype="u1")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.unpackbits(self.a).reshape(6, 4)
self.mask_b = np.array([False] * 15 + [True, True] + [False] * 7).reshape(6, 4)
self.mb = Masked(self.b, mask=self.mask_b)
@pytest.mark.parametrize("axis", [None, 1, 0])
def test_packbits(self, axis):
out = np.packbits(self.mb, axis=axis)
if axis is None:
expected = self.a
else:
expected = np.packbits(self.b, axis=axis)
expected_mask = np.packbits(self.mask_b, axis=axis) > 0
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_unpackbits(self):
out = np.unpackbits(self.ma)
mask = np.where(self.mask_a, np.uint8(255), np.uint8(0))
expected_mask = np.unpackbits(mask) > 0
assert_array_equal(out.unmasked, self.b.ravel())
assert_array_equal(out.mask, expected_mask)
class TestIndexFunctions(MaskedArraySetup):
"""Does not seem much sense to support these..."""
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.ma, 3)
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.ma,), 3)
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.ma)
class TestDtypeFunctions(MaskedArraySetup):
def check(self, function, *args, **kwargs):
out = function(self.ma, *args, **kwargs)
expected = function(self.a, *args, **kwargs)
assert out == expected
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.a.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.ma[0, 0])
expected = np.min_scalar_type(self.a[0, 0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(MaskedArraySetup):
def test_meshgrid(self):
a = np.arange(1.0, 4.0)
mask_a = np.array([True, False, False])
ma = Masked(a, mask=mask_a)
b = np.array([2.5, 10.0, 3.0, 4.0])
mask_b = np.array([False, True, False, True])
mb = Masked(b, mask=mask_b)
oa, ob = np.meshgrid(ma, mb)
xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])
ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])
for o, x, m in ((oa, xa, ma), (ob, xb, mb)):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestMemoryFunctions(MaskedArraySetup):
def test_shares_memory(self):
assert np.shares_memory(self.ma, self.ma.unmasked)
assert not np.shares_memory(self.ma, self.ma.mask)
def test_may_share_memory(self):
assert np.may_share_memory(self.ma, self.ma.unmasked)
assert not np.may_share_memory(self.ma, self.ma.mask)
class TestDatetimeFunctions:
# Could in principle support np.is_busday, np.busday_count, np.busday_offset.
@classmethod
def setup_class(self):
self.a = np.array(["2020-12-31", "2021-01-01", "2021-01-02"], dtype="M")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([["2021-01-07"], ["2021-01-31"]], dtype="M")
self.mask_b = np.array([[False], [True]])
self.mb = Masked(self.b, mask=self.mask_b)
def test_datetime_as_string(self):
out = np.datetime_as_string(self.ma)
expected = np.datetime_as_string(self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestNaNFunctions:
def setup_class(self):
self.a = np.array(
[
[np.nan, np.nan, 3.0],
[4.0, 5.0, 6.0],
]
)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.b = np.arange(1, 7).reshape(2, 3)
self.mask_b = self.mask_a
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, exact_fill_value=None, masked_result=True, **kwargs):
result = function(self.ma, **kwargs)
expected_data = function(self.ma.filled(np.nan), **kwargs)
expected_mask = np.isnan(expected_data)
if masked_result:
assert isinstance(result, Masked)
assert_array_equal(result.mask, expected_mask)
assert np.all(result == expected_data)
else:
assert not isinstance(result, Masked)
assert_array_equal(result, expected_data)
assert not np.any(expected_mask)
out = np.zeros_like(result)
result2 = function(self.ma, out=out, **kwargs)
assert result2 is out
assert_array_equal(result2, result)
def check_arg(self, function, **kwargs):
# arg functions do not have an 'out' argument, so just test directly.
result = function(self.ma, **kwargs)
assert not isinstance(result, Masked)
expected = function(self.ma.filled(np.nan), **kwargs)
assert_array_equal(result, expected)
def test_nanmin(self):
self.check(np.nanmin)
self.check(np.nanmin, axis=0)
self.check(np.nanmin, axis=1)
resi = np.nanmin(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.array([2, 4]))
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanmax(self):
self.check(np.nanmax)
def test_nanargmin(self):
self.check_arg(np.nanargmin)
self.check_arg(np.nanargmin, axis=1)
def test_nanargmax(self):
self.check_arg(np.nanargmax)
def test_nansum(self):
self.check(np.nansum, masked_result=False)
resi = np.nansum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([5, 10]))
def test_nanprod(self):
self.check(np.nanprod, masked_result=False)
resi = np.nanprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([6, 24]))
def test_nancumsum(self):
self.check(np.nancumsum, masked_result=False)
resi = np.nancumsum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))
def test_nancumprod(self):
self.check(np.nancumprod, masked_result=False)
resi = np.nancumprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))
def test_nanmean(self):
self.check(np.nanmean)
resi = np.nanmean(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanvar(self):
self.check(np.nanvar)
self.check(np.nanvar, ddof=1)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nanquantile(self):
self.check(np.nanquantile, q=0.5)
def test_nanpercentile(self):
self.check(np.nanpercentile, q=50)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
# Get covered functions
tested_functions = set()
for cov_cls in list(filter(inspect.isclass, locals().values())):
for k, v in cov_cls.__dict__.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
tested_functions.add(all_wrapped_functions[f])
def test_basic_testing_completeness():
assert all_wrapped == (tested_functions | IGNORED_FUNCTIONS | UNSUPPORTED_FUNCTIONS)
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped == (tested_functions | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(APPLY_TO_BOTH_FUNCTIONS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
def test_all_included(self):
included_in_helpers = (
MASKED_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(APPLY_TO_BOTH_FUNCTIONS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS == untested_functions
|
c7ecac58bf9dd381c3a11f523785db6fd6fc508e18048f762d34f65c8d85a44f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from .axislabels import AxisLabels
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .frame import EllipticalFrame, RectangularFrame1D
from .grid_paths import get_gridline_path, get_lon_lat_path
from .ticklabels import TickLabels
from .ticks import Ticks
__all__ = ["CoordinateHelper"]
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
LINES_TO_PATCHES_LINESTYLE = {
"-": "solid",
"--": "dashed",
"-.": "dashdot",
":": "dotted",
"none": "none",
"None": "none",
" ": "none",
"": "none",
}
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid="ignore"):
return np.mod(values - coord_wrap, 360.0) - (360.0 - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : float
The angle at which the longitude wraps (defaults to 360)
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(
self,
parent_axes=None,
parent_map=None,
transform=None,
coord_index=None,
coord_type="scalar",
coord_unit=None,
coord_wrap=None,
frame=None,
format_unit=None,
default_label=None,
):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self._format_unit = format_unit
self.frame = frame
self.default_label = default_label or ""
self._auto_axislabel = True
# Disable auto label for elliptical frames as it puts labels in
# annoying places.
if issubclass(self.parent_axes.frame_class, EllipticalFrame):
self._auto_axislabel = False
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
self.ticks.display_minor_ticks(rcParams["xtick.minor.visible"])
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
self.grid_lines_kwargs = {
"visible": False,
"facecolor": "none",
"edgecolor": rcParams["grid.color"],
"linestyle": LINES_TO_PATCHES_LINESTYLE[rcParams["grid.linestyle"]],
"linewidth": rcParams["grid.linewidth"],
"alpha": rcParams["grid.alpha"],
"transform": self.parent_axes.transData,
}
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
if grid_type == "lines" and not self.transform.has_inverse:
raise ValueError(
"The specified transform has no inverse, so the "
"grid cannot be drawn using grid_type='lines'"
)
if grid_type is None:
grid_type = "lines" if self.transform.has_inverse else "contours"
if grid_type in ("lines", "contours"):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if "color" in kwargs:
kwargs["edgecolor"] = kwargs.pop("color")
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs["visible"]:
if not draw_grid:
self.grid_lines_kwargs["visible"] = False
else:
self.grid_lines_kwargs["visible"] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : float, optional
The value to wrap at for angular coordinates
"""
self.coord_type = coord_type
if coord_type == "longitude" and coord_wrap is None:
self.coord_wrap = 360
elif coord_type != "longitude" and coord_wrap is not None:
raise NotImplementedError(
"coord_wrap is not yet supported for non-longitude coordinates"
)
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == "scalar":
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ["longitude", "latitude"]:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(
unit=self.coord_unit, format_unit=self._format_unit
)
else:
raise ValueError(
"coord_type should be one of 'scalar', 'longitude', or 'latitude'"
)
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or `~matplotlib.ticker.Formatter`
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter instance")
def format_coord(self, value, format="auto"):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == "longitude":
value = wrap_angle_at(value, self.coord_wrap)
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def get_format_unit(self):
"""
Get the unit for the major tick labels.
"""
return self._formatter_locator.format_unit
def set_ticks(
self,
values=None,
spacing=None,
number=None,
size=None,
width=None,
color=None,
alpha=None,
direction=None,
exclude_overlapping=None,
):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple, optional
A valid Matplotlib color for the ticks
alpha : float, optional
The alpha value (transparency) for the ticks.
direction : {'in','out'}, optional
Whether the ticks should point inwards or outwards.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError(
"At most one of values, spacing, or number should be specified"
)
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
if direction is not None:
if direction in ("in", "out"):
self.ticks.set_tick_out(direction == "out")
else:
raise ValueError("direction should be 'in' or 'out'")
if exclude_overlapping is not None:
warnings.warn(
"exclude_overlapping= should be passed to "
"set_ticklabel instead of set_ticks",
AstropyDeprecationWarning,
)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(
self, color=None, size=None, pad=None, exclude_overlapping=None, **kwargs
):
"""
Set the visual properties for the tick labels.
Parameters
----------
size : float, optional
The size of the ticks labels in points
color : str or tuple, optional
A valid Matplotlib color for the tick labels
pad : float, optional
Distance in points between tick and label.
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
**kwargs
Other keyword arguments are passed to :class:`matplotlib.text.Text`.
"""
if size is not None:
self.ticklabels.set_size(size)
if color is not None:
self.ticklabels.set_color(color)
if pad is not None:
self.ticklabels.set_pad(pad)
if exclude_overlapping is not None:
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
**kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
fontdict = kwargs.pop("fontdict", None)
# NOTE: When using plt.xlabel/plt.ylabel, minpad can get set explicitly
# to None so we need to make sure that in that case we change to a
# default numerical value.
if minpad is None:
minpad = 1
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
if fontdict is not None:
self.axislabels.update(fontdict)
def get_axislabel(self):
"""
Get the text for the axis label
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_auto_axislabel(self, auto_label):
"""
Render default axis labels if no explicit label is provided.
Parameters
----------
auto_label : `bool`
`True` if default labels will be rendered.
"""
self._auto_axislabel = bool(auto_label)
def get_auto_axislabel(self):
"""
Render default axis labels if no explicit label is provided.
Returns
-------
auto_axislabel : `bool`
`True` if default labels will be rendered.
"""
return self._auto_axislabel
def _get_default_axislabel(self):
unit = self.get_format_unit() or self.coord_unit
if not unit or unit is u.one or self.coord_type in ("longitude", "latitude"):
return f"{self.default_label}"
else:
return f"{self.default_label} [{unit:latex}]"
def set_axislabel_position(self, position):
"""
Set where axis labels should appear
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group("grid lines")
self._update_ticks()
if self.grid_lines_kwargs["visible"]:
if isinstance(self.frame, RectangularFrame1D):
self._update_grid_lines_1d()
else:
if self._grid_type == "lines":
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == "lines":
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group("grid lines")
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox):
"""
Draw all ticks and ticklabels.
"""
renderer.open_group("ticks")
self.ticks.draw(renderer)
self.ticklabels.draw(
renderer,
bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
tick_out_size=self.ticks.out_size,
)
renderer.close_group("ticks")
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, visible_ticks):
# Render the default axis label if no axis label is set.
if self._auto_axislabel and not self.get_axislabel():
self.set_axislabel(self._get_default_axislabel())
renderer.open_group("axis labels")
self.axislabels.draw(
renderer,
bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=self.ticks.ticks_locs,
visible_ticks=visible_ticks,
)
renderer.close_group("axis labels")
def _update_ticks(self):
if self.coord_index is None:
return
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(
*coord_range[self.coord_index]
)
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(
self._fl_spacing,
self.get_minor_frequency(),
*coord_range[self.coord_index],
)
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# https://matplotlib.org/stable/tutorials/advanced/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
if spine.data.size == 0:
continue
if not isinstance(self.frame, RectangularFrame1D):
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
with np.errstate(invalid="ignore"):
world0 = self.transform.transform(pixel0)[:, self.coord_index]
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
with np.errstate(invalid="ignore"):
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == "lower" else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
with np.errstate(invalid="ignore"):
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = world1 - world0
dy = world2 - world0
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.0)
dy = wrap_angle_at(dy, 180.0)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack(
[spine.normal_angle, spine.normal_angle[-1]]
)
with np.errstate(invalid="ignore"):
reset = ((normal_angle_full - tick_angle) % 360 > 90.0) & (
(tick_angle - normal_angle_full) % 360 > 90.0
)
tick_angle[reset] -= 180.0
else:
rotation = 90 if axis == "b" else -90
tick_angle = np.zeros((conf.frame_boundary_samples,)) + rotation
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap)
w2 = wrap_angle_at(w2, self.coord_wrap)
with np.errstate(invalid="ignore"):
w1[w2 - w1 > 180.0] += 360
w2[w1 - w2 > 180.0] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(
minor_ticks_w_coordinates,
spine,
axis,
w1,
w2,
tick_angle,
ticks="minor",
)
# format tick labels, add to scene
text = self.formatter(
self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing
)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(
self, tick_world_coordinates, spine, axis, w1, w2, tick_angle, ticks="major"
):
if self.coord_type == "longitude":
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack(
[tick_world_coordinates_values, tick_world_coordinates_values + 360]
)
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(
self.coord_unit
)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid="ignore"):
intersections = np.hstack(
[
np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0],
]
)
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.0e-13, atol=1.0e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (
spine.data[imax, 0] - spine.data[imin, 0]
)
y_data_i = spine.data[imin, 1] + frac * (
spine.data[imax, 1] - spine.data[imin, 1]
)
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.0:
delta_angle -= 360.0
elif delta_angle < -180.0:
delta_angle += 360.0
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap)
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == "major":
self.ticks.add(
axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac,
)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(
dict(
axis=axis,
data=(x_data_i, y_data_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac,
)
)
self.lbl_world.append(world)
else:
self.ticks.add_minor(
minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac,
)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines_1d(self):
if self.coord_index is None:
return
x_ticks_pos = [a[0] for a in self.ticks.pixel["b"]]
ymin, ymax = self.parent_axes.get_ylim()
self.grid_lines = []
for x_coord in x_ticks_pos:
pixel = [[x_coord, ymin], [x_coord, ymax]]
self.grid_lines.append(Path(pixel))
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
if self.coord_index is None:
return
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(
coord_range[1][0], coord_range[1][1], n_samples
)
else:
xy_world[subset, 0] = np.linspace(
coord_range[0][0], coord_range[0][1], n_samples
)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(
self._get_gridline(
xy_world[subset], pixel[subset], xy_world_round[subset]
)
)
def add_tickable_gridline(self, name, constant):
"""
Define a gridline that can be used for ticks and labels.
This gridline is not itself drawn, but instead can be specified in calls to
methods such as
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
for drawing ticks and labels. Since the gridline has a constant value in this
coordinate, and thus would not have any ticks or labels for the same coordinate,
the call to
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
would typically be made on the complementary coordinate.
Parameters
----------
name : str
The name for the gridline, usually a single character, but can be longer
constant : `~astropy.units.Quantity`
The constant coordinate value of the gridline
Notes
-----
A limitation is that the tickable part of the gridline must be contiguous. If
the gridline consists of more than one disconnected segment within the plot
extent, only one of those segments will be made tickable.
"""
if self.coord_index is None:
return
if name in self.frame:
raise ValueError(f"The frame already has a spine with the name '{name}'")
coord_range = self.parent_map.get_coord_range()
constant = constant.to_value(self.coord_unit)
from . import conf
n_samples = conf.grid_samples
# See comment in _update_grid_lines() about a WCS with more than 2 axes
xy_world = np.zeros((n_samples, 2))
xy_world[:, self.coord_index] = np.repeat(constant, n_samples)
# If the complementary coordinate is longitude, we attempt to close the gridline
# If such closure is a discontinuity, it will be filtered out later
if self.parent_map[1 - self.coord_index].coord_type == "longitude":
xy_world[:-1, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples - 1,
)
xy_world[-1, 1 - self.coord_index] = coord_range[1 - self.coord_index][0]
else:
xy_world[:, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples,
)
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
# Get the path of the gridline, which masks hidden parts
gridline = self._get_gridline(xy_world, pixel, xy_world_round)
def data_for_spine(spine):
vertices = gridline.vertices.copy()
codes = gridline.codes.copy()
# Retain the parts of the gridline within the rectangular plot bounds.
# We ought to use the potentially non-rectangular plot frame, but
# calculating that patch requires updating all spines first, which is a
# catch-22.
xmin, xmax = spine.parent_axes.get_xlim()
ymin, ymax = spine.parent_axes.get_ylim()
keep = (
(vertices[:, 0] >= xmin)
& (vertices[:, 0] <= xmax)
& (vertices[:, 1] >= ymin)
& (vertices[:, 1] <= ymax)
)
codes[~keep] = Path.MOVETO
codes[1:][~keep[:-1]] = Path.MOVETO
# We isolate the last segment (the last run of LINETOs), which must be preceded
# by at least one MOVETO and may be succeeded by MOVETOs.
# We have to account for longitude wrapping as well.
# Bail out if there is no visible segment
lineto = np.flatnonzero(codes == Path.LINETO)
if np.size(lineto) == 0:
return np.zeros((0, 2))
# Find the start of the last segment (the last MOVETO before the LINETOs)
last_segment = np.flatnonzero(codes[: lineto[-1]] == Path.MOVETO)[-1]
# Double the gridline if it is closed (i.e., spans all longitudes)
if vertices[0, 0] == vertices[-1, 0] and vertices[0, 1] == vertices[-1, 1]:
codes = np.concatenate([codes, codes[1:]])
vertices = np.vstack([vertices, vertices[1:, :]])
# Stop the last segment before any trailing MOVETOs
moveto = np.flatnonzero(codes[last_segment + 1 :] == Path.MOVETO)
if np.size(moveto) > 0:
return vertices[last_segment : last_segment + moveto[0] + 1, :]
else:
return vertices[last_segment:n_samples, :]
self.frame[name] = self.frame.spine_class(
self.frame.parent_axes, self.frame.transform, data_func=data_for_spine
)
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == "scalar":
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _clear_grid_contour(self):
if hasattr(self, "_grid") and self._grid:
for line in self._grid.collections:
line.remove()
def _update_grid_contour(self):
if self.coord_index is None:
return
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
from . import conf
res = conf.contour_grid_samples
x, y = np.meshgrid(np.linspace(xmin, xmax, res), np.linspace(ymin, ymax, res))
pixel = np.array([x.ravel(), y.ravel()]).T
world = self.transform.transform(pixel)
field = world[:, self.coord_index].reshape(res, res).T
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == "longitude":
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (
tick_world_coordinates_values[0] + tick_world_coordinates_values[1]
)
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(
tick_world_coordinates_values, mid
)
# Replace wraps by NaN
with np.errstate(invalid="ignore"):
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (
np.abs(np.diff(field[:-1, :], axis=1)) > 180
)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
with np.errstate(invalid="ignore"):
self._grid = self.parent_axes.contour(
x,
y,
field.transpose(),
levels=np.sort(tick_world_coordinates_values),
)
else:
self._grid = None
def tick_params(self, which="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this will not work correctly if
the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this will not work
correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
# First do some sanity checking on the keyword arguments
# colors= is a fallback default for color and labelcolor
if "colors" in kwargs:
if "color" not in kwargs:
kwargs["color"] = kwargs["colors"]
if "labelcolor" not in kwargs:
kwargs["labelcolor"] = kwargs["colors"]
# The only property that can be set *specifically* for minor ticks is
# the length. In future we could consider having a separate Ticks instance
# for minor ticks so that e.g. the color can be set separately.
if which == "minor":
if len(set(kwargs) - {"length"}) > 0:
raise ValueError(
"When setting which='minor', the only "
"property that can be set at the moment is "
"'length' (the minor tick length)"
)
else:
if "length" in kwargs:
self.ticks.set_minor_ticksize(kwargs["length"])
return
# At this point, we can now ignore the 'which' argument.
# Set the tick arguments
self.set_ticks(
size=kwargs.get("length"),
width=kwargs.get("width"),
color=kwargs.get("color"),
direction=kwargs.get("direction"),
)
# Set the tick position
position = None
for arg in ("bottom", "left", "top", "right"):
if arg in kwargs and position is None:
position = ""
if kwargs.get(arg):
position += arg[0]
if position is not None:
self.set_ticks_position(position)
# Set the tick label arguments.
self.set_ticklabel(
color=kwargs.get("labelcolor"),
size=kwargs.get("labelsize"),
pad=kwargs.get("pad"),
)
# Set the tick label position
position = None
for arg in ("bottom", "left", "top", "right"):
if "label" + arg in kwargs and position is None:
position = ""
if kwargs.get("label" + arg):
position += arg[0]
if position is not None:
self.set_ticklabel_position(position)
# And the grid settings
if "grid_color" in kwargs:
self.grid_lines_kwargs["edgecolor"] = kwargs["grid_color"]
if "grid_alpha" in kwargs:
self.grid_lines_kwargs["alpha"] = kwargs["grid_alpha"]
if "grid_linewidth" in kwargs:
self.grid_lines_kwargs["linewidth"] = kwargs["grid_linewidth"]
if "grid_linestyle" in kwargs:
if kwargs["grid_linestyle"] in LINES_TO_PATCHES_LINESTYLE:
self.grid_lines_kwargs["linestyle"] = LINES_TO_PATCHES_LINESTYLE[
kwargs["grid_linestyle"]
]
else:
self.grid_lines_kwargs["linestyle"] = kwargs["grid_linestyle"]
|
222e5178ffbebaa3f7483fa4d4f08aba6bc44924f81a0be78edfb5bce13e5530 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.lines
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib import rc_context
from matplotlib.figure import Figure
from matplotlib.patches import Circle, Rectangle
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.tests.figures import figure_test
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.visualization.wcsaxes import WCSAxes, add_beam, add_scalebar
from astropy.visualization.wcsaxes.frame import EllipticalFrame
from astropy.visualization.wcsaxes.patches import Quadrangle, SphericalCircle
from astropy.wcs import WCS
class BaseImageTests:
@classmethod
def setup_class(cls):
msx_header = get_pkg_data_filename("data/msx_header")
cls.msx_header = fits.Header.fromtextfile(msx_header)
rosat_header = get_pkg_data_filename("data/rosat_header")
cls.rosat_header = fits.Header.fromtextfile(rosat_header)
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
cube_header = get_pkg_data_filename("data/cube_header")
cls.cube_header = fits.Header.fromtextfile(cube_header)
slice_header = get_pkg_data_filename("data/slice_header")
cls.slice_header = fits.Header.fromtextfile(slice_header)
def teardown_method(self, method):
plt.close("all")
class TestBasic(BaseImageTests):
@figure_test
def test_tight_layout(self):
# Check that tight_layout works on a WCSAxes.
fig = plt.figure(figsize=(8, 6))
for i in (1, 2):
fig.add_subplot(2, 1, i, projection=WCS(self.msx_header))
fig.tight_layout()
return fig
@figure_test
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
return fig
@figure_test
def test_axes_off(self):
# Test for turning the axes off
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header))
ax.imshow(np.arange(12).reshape((3, 4)))
ax.set_axis_off()
return fig
@figure_test
@pytest.mark.parametrize("axisbelow", [True, False, "line"])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
ax.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30.0, 50.0), 60.0, 50.0, facecolor="green", edgecolor="red")
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
return fig
@figure_test
def test_contour_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(
data,
transform=ax.get_transform(wcs_msx),
colors="orange",
levels=[2.5e-5, 5e-5, 1.0e-4],
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_contourf_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contourf(
data, transform=ax.get_transform(wcs_msx), levels=[2.5e-5, 5e-5, 1.0e-4]
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.25, 0.25, 0.65, 0.65], projection=WCS(self.msx_header), aspect="equal"
)
# Change the format of the ticks
ax.coords[0].set_major_formatter("dd:mm:ss")
ax.coords[1].set_major_formatter("dd:mm:ss.ssss")
# Overlay grid on image
ax.grid(color="red", alpha=1.0, lw=1, linestyle="dashed")
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords["glon"].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords["glat"].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords["glon"].set_axislabel("Galactic Longitude", minpad=1.6)
ax.coords["glat"].set_axislabel("Galactic Latitude", minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color("red")
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == "red"
assert ax.coords.frame.get_linewidth() == 2
return fig
@figure_test
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.rosat_header), aspect="equal"
)
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color="black", alpha=1.0, lw=1, linestyle="dashed")
p = Circle((300, 100), radius=40, ec="yellow", fc="none")
ax.add_patch(p)
p = Circle(
(30.0, 20.0),
radius=20.0,
ec="orange",
fc="none",
transform=ax.get_transform("world"),
)
ax.add_patch(p)
p = Circle(
(60.0, 50.0),
radius=20.0,
ec="red",
fc="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(p)
p = Circle(
(40.0, 60.0),
radius=20.0,
ec="green",
fc="none",
transform=ax.get_transform("galactic"),
)
ax.add_patch(p)
return fig
@figure_test
def test_cube_slice_image(self):
# Test for cube slicing
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel("Velocity m/s")
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[0].grid(grid_type="contours", color="purple", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="orange", linestyle="solid")
ax.coords[2].grid(grid_type="contours", color="red", linestyle="solid")
return fig
@figure_test
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=("x", "y", 50),
aspect="equal",
)
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type="contours", color="blue", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="red", linestyle="solid")
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_plot_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
lines = ax.plot_coord(c, "o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(lines, list)
assert isinstance(lines[0], matplotlib.lines.Line2D)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_scatter_coord(self):
from matplotlib.collections import PathCollection
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
sc = ax.scatter_coord(c, marker="o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(sc, PathCollection)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_plot_line(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_major_formatter("x.xx")
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel("Velocity km/s")
ax.coords[1].set_ticks(width=1)
ax.coords[2].set_ticks(width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
return fig
@figure_test
def test_ticks_labels(self):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color="blue", alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color="red", alpha=0.9, width=1)
ax.coords[0].set_ticks_position("all")
ax.coords[1].set_ticks_position("all")
ax.coords[0].set_axislabel("X-axis", size=20)
ax.coords[1].set_axislabel(
"Y-axis",
color="green",
size=25,
weight="regular",
style="normal",
family="cmtt10",
)
ax.coords[0].set_axislabel_position("t")
ax.coords[1].set_axislabel_position("r")
ax.coords[0].set_ticklabel(
color="purple",
size=15,
alpha=1,
weight="light",
style="normal",
family="cmss10",
)
ax.coords[1].set_ticklabel(
color="black", size=18, alpha=0.9, weight="bold", family="cmr10"
)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("r")
return fig
@figure_test
def test_rcparams(self):
# Test custom rcParams
with rc_context(
{
"axes.labelcolor": "purple",
"axes.labelsize": 14,
"axes.labelweight": "bold",
"axes.linewidth": 3,
"axes.facecolor": "0.5",
"axes.edgecolor": "green",
"xtick.color": "red",
"xtick.labelsize": 8,
"xtick.direction": "in",
"xtick.minor.visible": True,
"xtick.minor.size": 5,
"xtick.major.size": 20,
"xtick.major.width": 3,
"xtick.major.pad": 10,
"grid.color": "blue",
"grid.linestyle": ":",
"grid.linewidth": 1,
"grid.alpha": 0.5,
}
):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.15, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(6, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_set_coord_type(self):
# Test for setting coord_type
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.2, 0.2, 0.6, 0.6], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type("scalar")
ax.coords[1].set_coord_type("scalar")
ax.coords[0].set_major_formatter("x.xxx")
ax.coords[1].set_major_formatter("x.xxx")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("all")
return fig
@figure_test
def test_axislabels_regression(self):
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule("always")
ax.coords[1].ticklabels.set_visible(False)
return fig
@figure_test(savefig_kwargs={"bbox_inches": "tight"})
def test_noncelestial_angular(self, tmp_path):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["solar-x", "solar-y"]
wcs.wcs.cunit = ["arcsec", "arcsec"]
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin="lower")
ax.coords[0].set_coord_type("longitude", coord_wrap=180)
ax.coords[1].set_coord_type("latitude")
ax.coords[0].set_major_formatter("s.s")
ax.coords[1].set_major_formatter("s.s")
ax.coords[0].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.coords[1].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.grid(color="white", ls="solid")
# Force drawing (needed for format_coord)
fig.savefig(tmp_path / "nothing")
assert ax.format_coord(512, 512) == "513.0 513.0 (world)"
return fig
@figure_test
def test_patches_distortion(self, tmp_path):
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
# Pixel coordinates
r = Rectangle((30.0, 50.0), 60.0, 50.0, edgecolor="green", facecolor="none")
ax.add_patch(r)
# FK5 coordinates
r = Rectangle(
(266.4, -28.9),
0.3,
0.3,
edgecolor="cyan",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r)
# FK5 coordinates
c = Circle(
(266.4, -29.1),
0.15,
edgecolor="magenta",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(c)
# Pixel coordinates
ax.scatter(
[40, 100, 130],
[30, 130, 60],
s=100,
edgecolor="red",
facecolor=(1, 0, 0, 0.5),
)
# World coordinates (should not be distorted)
ax.scatter(
266.78238,
-28.769255,
transform=ax.get_transform("fk5"),
s=300,
edgecolor="red",
facecolor="none",
)
# World coordinates (should not be distorted)
r1 = SphericalCircle(
(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r1)
r2 = SphericalCircle(
SkyCoord(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
with pytest.warns(
AstropyUserWarning,
match="Received `center` of representation type "
"<class 'astropy.coordinates.representation.CartesianRepresentation'> " # noqa: E501
"will be converted to SphericalRepresentation",
):
r3 = SphericalCircle(
SkyCoord(
x=-0.05486461,
y=-0.87204803,
z=-0.48633538,
representation_type="cartesian",
),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
# Test to verify that SphericalCircle works irrespective of whether
# the input(center) is a tuple or a SkyCoord object.
assert (r1.get_xy() == r2.get_xy()).all()
assert np.allclose(r1.get_xy(), r3.get_xy())
assert np.allclose(r2.get_xy()[0], [266.4, -29.25])
return fig
@figure_test
def test_quadrangle(self, tmp_path):
# Test that Quadrangle can have curved edges while Rectangle does not
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
ax.set_xlim(0, 10000)
ax.set_ylim(-10000, 0)
# Add a quadrangle patch (100 degrees by 20 degrees)
q = Quadrangle(
(255, -70) * u.deg,
100 * u.deg,
20 * u.deg,
label="Quadrangle",
edgecolor="blue",
facecolor="none",
transform=ax.get_transform("icrs"),
)
ax.add_patch(q)
# Add a rectangle patch (100 degrees by 20 degrees)
r = Rectangle(
(255, -70),
100,
20,
label="Rectangle",
edgecolor="red",
facecolor="none",
linestyle="--",
transform=ax.get_transform("icrs"),
)
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
return fig
@figure_test
def test_beam_shape_from_args(self, tmp_path):
# Test for adding the beam shape with the beam parameters as arguments
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(
ax,
major=2 * u.arcmin,
minor=1 * u.arcmin,
angle=-30 * u.degree,
corner="bottom right",
frame=True,
borderpad=0.0,
pad=1.0,
color="black",
)
return fig
@figure_test
def test_beam_shape_from_header(self, tmp_path):
# Test for adding the beam shape with the beam parameters from a header
hdr = self.msx_header
hdr["BMAJ"] = (2 * u.arcmin).to(u.degree).value
hdr["BMIN"] = (1 * u.arcmin).to(u.degree).value
hdr["BPA"] = 30.0
wcs = WCS(hdr)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(ax, header=hdr)
return fig
@figure_test
def test_scalebar(self, tmp_path):
# Test for adding a scale bar
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_scalebar(
ax,
2 * u.arcmin,
label="2'",
corner="top right",
borderpad=1.0,
label_top=True,
)
return fig
@figure_test
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(5, 3))
fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
return fig
@figure_test
def test_hms_labels(self):
# This tests the apparance of the hms superscripts in tick labels
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@figure_test(style={"text.usetex": True})
def test_latex_labels(self):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@figure_test
def test_tick_params(self):
# This is a test to make sure that tick_params works correctly. We try
# and test as much as possible with a single reference image.
wcs = WCS()
wcs.wcs.ctype = ["lon", "lat"]
fig = plt.figure(figsize=(6, 6))
# The first subplot tests:
# - that plt.tick_params works
# - that by default both axes are changed
# - changing the tick direction and appearance, the label appearance and padding
ax = fig.add_subplot(2, 2, 1, projection=wcs)
plt.tick_params(
direction="in",
length=20,
width=5,
pad=6,
labelsize=6,
color="red",
labelcolor="blue",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The second subplot tests:
# - that specifying grid parameters doesn't actually cause the grid to
# be shown (as expected)
# - that axis= can be given integer coordinates or their string name
# - that the tick positioning works (bottom/left/top/right)
# Make sure that we can pass things that can index coords
ax = fig.add_subplot(2, 2, 2, projection=wcs)
plt.tick_params(
axis=0,
direction="in",
length=20,
width=5,
pad=4,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
plt.tick_params(
axis="lat",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The third subplot tests:
# - that ax.tick_params works
# - that the grid has the correct settings once shown explicitly
# - that we can use axis='x' and axis='y'
ax = fig.add_subplot(2, 2, 3, projection=wcs)
ax.tick_params(
axis="x",
direction="in",
length=20,
width=5,
pad=20,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
ax.tick_params(
axis="y",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
plt.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The final subplot tests:
# - that we can use tick_params on a specific coordinate
# - that the label positioning can be customized
# - that the colors argument works
# - that which='minor' works
ax = fig.add_subplot(2, 2, 4, projection=wcs)
ax.coords[0].tick_params(
length=4,
pad=2,
colors="orange",
labelbottom=True,
labeltop=True,
labelsize=10,
)
ax.coords[1].display_minor_ticks(True)
ax.coords[1].tick_params(which="minor", length=6)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
@pytest.fixture
def wave_wcs_1d():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["WAVE"]
wcs.wcs.cunit = ["m"]
wcs.wcs.crpix = [1]
wcs.wcs.cdelt = [5]
wcs.wcs.crval = [45]
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_1d_wcs(wave_wcs_1d):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.set_xlabel("this is the x-axis")
ax.set_ylabel("this is the y-axis")
return fig
@figure_test
def test_1d_plot_1d_wcs_format_unit(wave_wcs_1d):
"""
This test ensures that the format unit is updated and displayed for both
the axis ticks and default axis labels.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.coords[0].set_format_unit("nm")
return fig
@pytest.fixture
def spatial_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [15] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_2d_wcs_correlated(spatial_wcs_2d):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d, slices=("x", 0))
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
ax.coords["glon"].set_ticks(color="red")
ax.coords["glon"].set_ticklabel(color="red")
ax.coords["glon"].grid(color="red")
ax.coords["glat"].set_ticks(color="blue")
ax.coords["glat"].set_ticklabel(color="blue")
ax.coords["glat"].grid(color="blue")
return fig
@pytest.fixture
def spatial_wcs_2d_small_angle():
"""
This WCS has an almost linear correlation between the pixel and world axes
close to the reference pixel.
"""
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [10 / 3600, 5 / 3600]
wcs.wcs.crval = [0] * 2
wcs.wcs.set()
return wcs
@pytest.mark.parametrize(
"slices, bottom_axis",
[
# Remember SLLWCS takes slices in array order
(np.s_[0, :], "custom:pos.helioprojective.lon"),
(np.s_[:, 0], "custom:pos.helioprojective.lat"),
],
)
@figure_test
def test_1d_plot_1d_sliced_low_level_wcs(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
Test that a SLLWCS through a coupled 2D WCS plots as line OK.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle[slices])
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
plt.draw()
assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"]
return fig
@pytest.mark.parametrize(
"slices, bottom_axis", [(("x", 0), "hpln"), ((0, "x"), "hplt")]
)
@figure_test
def test_1d_plot_put_varying_axis_on_bottom_lon(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
When we plot a 1D slice through spatial axes, we want to put the axis which
actually changes on the bottom.
For example an aligned wcs, pixel grid where you plot a lon slice through a
lat axis, you would end up with no ticks on the bottom as the lon doesn't
change, and a set of lat ticks on the top because it does but it's the
correlated axis not the actual one you are plotting against.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle, slices=slices)
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
plt.draw()
assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"]
return fig
@figure_test
def test_allsky_labels_wrap():
# Regression test for a bug that caused some tick labels to not be shown
# when looking at all-sky maps in the case where coord_wrap < 360
fig = plt.figure(figsize=(4, 4))
icen = 0
for ctype in [("GLON-CAR", "GLAT-CAR"), ("HGLN-CAR", "HGLT-CAR")]:
for cen in [0, 90, 180, 270]:
icen += 1
wcs = WCS(naxis=2)
wcs.wcs.ctype = ctype
wcs.wcs.crval = cen, 0
wcs.wcs.crpix = 360.5, 180.5
wcs.wcs.cdelt = -0.5, 0.5
ax = fig.add_subplot(8, 1, icen, projection=wcs)
ax.set_xlim(-0.5, 719.5)
ax.coords[0].set_ticks(spacing=50 * u.deg)
ax.coords[0].set_ticks_position("b")
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
ax.coords[1].set_ticklabel_visible(False)
ax.coords[1].set_ticks_visible(False)
fig.subplots_adjust(hspace=2, left=0.05, right=0.95, bottom=0.1, top=0.95)
return fig
@figure_test
def test_tickable_gridlines():
wcs = WCS(
{
"naxis": 2,
"naxis1": 360,
"naxis2": 180,
"crpix1": 180.5,
"crpix2": 90.5,
"cdelt1": -1,
"cdelt2": 1,
"ctype1": "RA---CAR",
"ctype2": "DEC--CAR",
}
)
fig = Figure()
ax = fig.add_subplot(projection=wcs)
ax.set_xlim(-0.5, 360 - 0.5)
ax.set_ylim(-0.5, 150 - 0.5)
lon, lat = ax.coords
lon.grid()
lat.grid()
overlay = ax.get_coords_overlay("galactic")
overlay[0].set_ticks(spacing=30 * u.deg)
overlay[1].set_ticks(spacing=30 * u.deg)
# Test both single-character and multi-character names
overlay[1].add_tickable_gridline("g", -30 * u.deg)
overlay[0].add_tickable_gridline("const-glon", 30 * u.deg)
overlay[0].grid(color="magenta")
overlay[0].set_ticklabel_position("gt")
overlay[0].set_ticklabel(color="magenta")
overlay[0].set_axislabel("Galactic longitude", color="magenta")
overlay[1].grid(color="blue")
overlay[1].set_ticklabel_position(("const-glon", "r"))
overlay[1].set_ticklabel(color="blue")
overlay[1].set_axislabel("Galactic latitude", color="blue")
return fig
|
cd232a6fd2b18440bc6b94f8459293260ee047bf3e176b841d4b528c64096f01 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import datetime
import functools
import os
from copy import deepcopy
from decimal import Decimal, localcontext
from io import StringIO
import erfa
import numpy as np
import pytest
from erfa import ErfaWarning
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.table import Column, Table
from astropy.time import (
STANDARD_TIME_SCALES,
TIME_FORMATS,
ScaleValueError,
Time,
TimeDelta,
TimeString,
TimezoneInfo,
conf,
)
from astropy.utils import iers, isiterable
from astropy.utils.compat.optional_deps import HAS_H5PY, HAS_PYTZ
from astropy.utils.exceptions import AstropyDeprecationWarning
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps * 24 * 3600
)
allclose_year = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=0.0
) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"]
t = Time(times, format="iso", scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2, np.array([-0.5 + 1.4288980208333335e-06, -0.50000000e00])
)
# Set scale to TAI
t = t.tai
assert (
repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2,
np.array([-0.5 + 0.00037179926839122024, -0.5 + 0.00039351851851851852]),
)
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (
repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>"
)
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(
t.cxcsec, np.array([31536064.307456788, 378691266.18400002])
)
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format="jd")
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000.0, 2450010.0)
t2 = Time(val, format="jd")
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.0
t3 = Time(val, val2, format="jd")
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.0) / 10.0).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format="jd")
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize("format_", Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == "tai"
@pytest.mark.parametrize("value", [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format="jd", scale="utc")
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format="iso", scale="tai", precision=1)
assert t2.value == "2010-01-01 00:00:34.0"
t2 = Time(t, format="iso", scale="tai", out_subfmt="date")
assert t2.value == "2010-01-01"
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format="mjd", scale="utc", location=("45d", "50d"))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format="mjd", scale="utc")
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.arange(len(mjd)), np.arange(len(mjd))),
)
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0.0, 0.0, 0.0), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0.0, 0.999, 0.2)
t7 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=("45d", "50d"),
)
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
)
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == "2010-01-01 00:00:00.000"
assert t.tt.iso == "2010-01-01 00:01:06.184"
assert t.tai.fits == "2010-01-01T00:00:34.000"
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == "2010-01-01T00:01:06.910"
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
# Uses initial class-defined precision=3
assert t.iso == "2010-01-01 00:00:00.000"
# Set instance precision to 9
t.precision = 9
assert t.iso == "2010-01-01 00:00:00.000000000"
assert t.tai.utc.iso == "2010-01-01 00:00:00.000000000"
def test_precision_input(self):
"""Verifies that precision can only be 0-9 (inclusive). Any other
value should raise a ValueError exception."""
err_message = "precision attribute must be an int"
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=10)
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.precision = -1
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=7,
location=(lon, lat),
)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843728"
assert t.tcb.iso == "2006-01-15 21:25:56.8939523"
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time("2006-01-15 21:24:37.5", format="iso", scale="utc", precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843725"
assert t.tcb.iso == "2006-01-15 21:25:56.8939519"
# Check we get the same result
t2 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
location=(0 * u.m, 0 * u.m, 0 * u.m),
)
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=location,
)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(location.x, location.y, location.z),
)
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert np.all(t.utc.iso == "2006-01-15 21:24:37.500000")
assert np.all(t.tdb.iso[0] == "2006-01-15 21:25:42.684373")
t2 = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert np.all(t2.utc.iso == "2006-01-15 21:24:37.500000")
assert t2.tdb.iso[0] == "2006-01-15 21:25:42.684373"
assert t2.tdb.iso[1] != "2006-01-15 21:25:42.684373"
with pytest.raises(ValueError): # 1 time, but two locations
Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
with pytest.raises(ValueError): # 3 times, but two locations
Time(
["2006-01-15 21:24:37.5"] * 3,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
# multidimensional
mjd = np.arange(50000.0, 50008.0).reshape(4, 2)
t3 = Time(mjd, format="mjd", scale="utc", location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(
mjd,
format="mjd",
scale="utc",
location=(
np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]]),
),
)
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp("auto_download", False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale=scale1,
location=(lon, lat),
)
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = "local"
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format="decimalyear")
Time(100.0, format="cxcsec")
Time(100.0, format="unix")
Time(100.0, format="gps")
Time(1950.0, format="byear", scale="tai")
Time(2000.0, format="jyear", scale="tai")
Time("B1950.0", format="byear_str", scale="tai")
Time("J2000.0", format="jyear_str", scale="tai")
Time("2000-01-01 12:23:34.0", format="iso", scale="tai")
Time("2000-01-01 12:23:34.0Z", format="iso", scale="utc")
Time("2000-01-01T12:23:34.0", format="isot", scale="tai")
Time("2000-01-01T12:23:34.0Z", format="isot", scale="utc")
Time("2000-01-01T12:23:34.0", format="fits")
Time("2000-01-01T12:23:34.0", format="fits", scale="tdb")
Time(2400000.5, 51544.0333981, format="jd", scale="tai")
Time(0.0, 51544.0333981, format="mjd", scale="tai")
Time("2000:001:12:23:34.0", format="yday", scale="tai")
Time("2000:001:12:23:34.0Z", format="yday", scale="utc")
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format="datetime", scale="tai")
Time([dt, dt], format="datetime", scale="tai")
dt64 = np.datetime64("2012-06-18T02:00:05.453000000")
Time(dt64, format="datetime64", scale="tai")
Time([dt64, dt64], format="datetime64", scale="tai")
def test_local_format_transforms(self):
"""
Test transformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time("2006-01-15 21:24:37.5", scale="local")
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(
t.decimalyear,
2006.0408002758752,
atol=0.001 / 3600.0 / 24.0 / 365.0,
rtol=0.0,
)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == "2006-01-15T21:24:37.500"
assert t.yday == "2006:015:21:24:37.500"
assert t.fits == "2006-01-15T21:24:37.500"
assert_allclose(
t.byear, 2006.04217888831, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert_allclose(
t.jyear, 2006.0407723496082, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert t.byear_str == "B2006.042"
assert t.jyear_str == "J2006.041"
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456000"
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale="utc")
assert t2.datetime == dt
t = Time([dt, dt2], scale="utc")
assert np.all(t.value == [dt, dt2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2 - dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
dt64_2 = np.datetime64("2000-01-02")
t = Time(dt64, scale="utc", precision=9, format="datetime64")
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64
t = Time(dt64_2, scale="utc", precision=3, format="datetime64")
assert t.iso == "2000-01-02 00:00:00.000"
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale="utc", format="datetime64")
assert np.all(t.value == [dt64, dt64_2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime64 == np.datetime64("2000-01-01T01:01:01.123456789")
# broadcasting
dt3 = (dt64 + (dt64_2 - dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc", format="datetime64")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format="datetime64")
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format="datetime64"))
assert Time(t3[2, 0], format="datetime64") == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format="jd", scale="tai", precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == "B2015.136594"
assert t.jyear_str == "J2015.134993"
t2 = Time(t.byear, format="byear", scale="tai")
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format="jyear", scale="tai")
assert allclose_jd(t2.jd, jd)
t = Time("J2015.134993", scale="tai", precision=6)
assert np.allclose(
t.jd, jd, rtol=1e-10, atol=0
) # J2015.134993 has 10 digit precision
assert t.byear_str == "B2015.136594"
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format="iso", scale="utc")
with pytest.raises(ValueError):
Time("2000:001", format="jd", scale="utc")
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ["bad"], format="mjd", scale="tai")
with pytest.raises(ValueError):
Time(50000.0, "bad", format="mjd", scale="tai")
with pytest.raises(ValueError):
Time("2005-08-04T00:01:02.000Z", scale="tai")
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format="jd", scale="utc")
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time("2000-01-02T03:04:05(TAI)", scale="utc")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(TAI")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(UT(NIST)")
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f"{year:04d}-{month:02d}"
yyyy_mm_dd = f"{year:04d}-{month:02d}-{day:02d}"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + "-01 23:59:60.0", scale="utc")
assert t1.iso == yyyy_mm + "-02 00:00:00.000"
# Leap second is different
t1 = Time(yyyy_mm_dd + " 23:59:59.900", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:59.900"
t1 = Time(yyyy_mm_dd + " 23:59:60.000", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.000"
t1 = Time(yyyy_mm_dd + " 23:59:60.999", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.999"
if month == 6:
yyyy_mm_dd_plus1 = f"{year:04d}-07-01"
else:
yyyy_mm_dd_plus1 = f"{year + 1:04d}-01-01"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + " 23:59:61.0", scale="utc")
assert t1.iso == yyyy_mm_dd_plus1 + " 00:00:00.000"
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + " 23:59:59", scale="utc")
t1 = Time(yyyy_mm_dd_plus1 + " 00:00:00", scale="utc")
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time("2007:001", scale="tai")
t2 = Time(["2007-01-02", "2007-01-03"], scale="utc")
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale="utc")
assert t3.scale == "utc"
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale="tt")
assert t3.scale == "tt"
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000.0, 50006.0)
frac = np.arange(0.0, 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format="mjd", scale="utc")
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale="local")
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize(
"d",
[
dict(val="2001:001", val2="ignored", scale="utc"),
dict(
val={
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
val2="ignored",
scale="utc",
),
dict(val=np.datetime64("2005-02-25"), val2="ignored", scale="utc"),
dict(
val=datetime.datetime(2000, 1, 2, 12, 0, 0), val2="ignored", scale="utc"
),
],
)
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format="mjd", scale="tai")
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000.0, 50007.0)
frac = np.arange(0.0, 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format="mjd", scale="utc")
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format="mjd", scale="tai")
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = 2458000 + np.arange(3)
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00:00.000",
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
# Heterogeneous input formats with in_subfmt='date_*'
times = ["2000-01-01 01:01", "2000-01-01 01:01:01", "2000-01-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai", in_subfmt="date_*")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time("2000-01-01 01:01", format="iso", scale="tai", in_subfmt="date")
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time(
"2000-01-01 01:01", format="iso", scale="tai", in_subfmt="doesnt exist"
)
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai", out_subfmt="date_hm")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00",
"2000-01-01 01:01",
"2000-01-01 01:01",
"2000-01-01 01:01",
]
)
)
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-01-01", "2000-01-01T01:01:01", "2000-01-01T01:01:01.123"]
t = Time(times, format="fits", scale="tai")
assert np.all(
t.fits
== np.array(
[
"2000-01-01T00:00:00.000",
"2000-01-01T01:01:01.000",
"2000-01-01T01:01:01.123",
]
)
)
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format="fits", out_subfmt="long*")
assert np.all(
t2.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+02000-01-01T01:01:01.123",
]
)
)
# Implicit long format for output, because of negative year.
times[2] = "-00594-01-01"
t3 = Time(times, format="fits", scale="tai")
assert np.all(
t3.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"-00594-01-01T00:00:00.000",
]
)
)
# Implicit long format for output, because of large positive year.
times[2] = "+10594-01-01"
t4 = Time(times, format="fits", scale="tai")
assert np.all(
t4.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+10594-01-01T00:00:00.000",
]
)
)
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-12-01", "2001-12-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai")
t.out_subfmt = "date_hm"
assert np.all(t.yday == np.array(["2000:336:00:00", "2001:335:01:01"]))
t.out_subfmt = "*"
assert np.all(
t.yday == np.array(["2000:336:00:00:00.000", "2001:335:01:01:01.123"])
)
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format="cxcsec", scale="utc")
assert t.scale == "utc"
t = Time(100.0, format="unix", scale="tai")
assert t.scale == "tai"
t = Time(100.0, format="gps", scale="utc")
assert t.scale == "utc"
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format="byear", scale="bad scale")
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time("2000:001:00:00:00", scale="bad scale")
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (
("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc"),
):
with pytest.warns(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][: inputs[0].index("(")], format="isot", scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:00.123456789(UTC)")
t = t.tai
assert t.isot == "1999-01-01T00:00:32.123"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)")
t = t.utc
assert t.isot == "1999-01-01T00:00:00.123"
# Check scale consistency
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="tai")
assert t.scale == "tai"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(ET)", scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError), pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format="cxcsec")
assert t.scale == "tt"
t = Time(100.0, format="unix")
assert t.scale == "utc"
t = Time(100.0, format="gps")
assert t.scale == "tai"
for date in ("2000:001", "2000-01-01T00:00:00"):
t = Time(date)
assert t.scale == "utc"
t = Time(2000.1, format="byear")
assert t.scale == "tt"
t = Time("J2000")
assert t.scale == "tt"
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format="cxcsec", scale="tai")
assert t.tt.iso == "1998-01-01 00:00:00.000"
# Create new time object from this one and change scale, format
t2 = Time(t, scale="tt", format="iso")
assert t2.value == "1998-01-01 00:00:00.000"
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format="cxcsec", scale="utc")
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == "2010:001:00:00:00.000"
t = Time("2010:001:00:00:00.000", scale="utc")
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Round trip through epoch time
for scale in ("utc", "tt"):
t = Time("2000:001", scale=scale)
t2 = Time(t.unix, scale=scale, format="unix")
assert getattr(t2, scale).iso == "2000-01-01 00:00:00.000"
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time("2013-05-20 21:18:46", scale="utc")
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time("2004-09-16T23:59:59", scale="utc")
assert allclose_sec(t.unix, 1095379199.0)
def test_plot_date(self):
"""Test the plot_date format.
Depending on the situation with matplotlib, this can give different
results because the plot date epoch time changed in matplotlib 3.3. This
test tries to use the matplotlib date2num function to make the test
independent of version, but if matplotlib isn't available then the code
(and test) use the pre-3.3 epoch.
"""
try:
from matplotlib.dates import date2num
except ImportError:
# No matplotlib, in which case this uses the epoch 0000-12-31
# as per matplotlib < 3.3.
# Value from:
# matplotlib.dates.set_epoch('0000-12-31')
# val = matplotlib.dates.date2num('2000-01-01')
val = 730120.0
else:
val = date2num(datetime.datetime(2000, 1, 1))
t = Time("2000-01-01 00:00:00", scale="utc")
assert np.allclose(t.plot_date, val, atol=1e-5, rtol=0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time("54321.000000000001", format="mjd")
assert t == Time(54321, 1e-12, format="mjd")
assert t.mjd == 54321.0 # Lost precision!
assert t.value == 54321.0 # Lost precision!
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", "bytes") == b"54321.000000000001"
expected_long = np.longdouble(54321.0) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(
t.to_value("mjd", subfmt="long"),
expected_long,
rtol=0,
atol=np.finfo(float).eps,
)
t.out_subfmt = "str"
assert t.value == "54321.000000000001"
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.mjd == "54321.000000000001"
assert t.to_value("mjd", subfmt="bytes") == b"54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
t.out_subfmt = "long"
assert np.allclose(t.value, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert np.allclose(
t.to_value("mjd", subfmt=None),
expected_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
assert np.allclose(t.mjd, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format="mjd")
expected = Time(i, f, format="mjd")
assert abs(t - expected) <= 20.0 * u.ps
t_float = Time(i + f, format="mjd")
assert t_float == Time(i, format="mjd")
assert t_float != t
assert t.value == 54321.0 # Lost precision!
assert np.allclose(
t.to_value("mjd", subfmt="long"),
mjd_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
t2 = Time(mjd_long, format="mjd", out_subfmt="long")
assert np.allclose(t2.value, mjd_long, rtol=0.0, atol=np.finfo(float).eps)
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble_one_val(self):
"""Ensure either val1 or val2 being longdouble is possible.
Regression test for issue gh-10033.
"""
i = 54321
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
t1 = Time(i, f, format="mjd")
t2 = Time(np.longdouble(i), f, format="mjd")
t3 = Time(i, np.longdouble(f), format="mjd")
t4 = Time(np.longdouble(i), np.longdouble(f), format="mjd")
assert t1 == t2 == t3 == t4
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1.0 if fmt == "mjd" else 24.0 * 3600.0)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol
)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt="long")
assert np.allclose(tm_long2, t_fmt_long2, rtol=0.0, atol=atol)
def test_subformat_input(self):
s = "54321.01234567890123456789"
i, f = s.split(".") # Note, OK only for fraction < 0.5
t = Time(float(i), float("." + f), format="mjd")
t_str = Time(s, format="mjd")
t_bytes = Time(s.encode("ascii"), format="mjd")
t_decimal = Time(Decimal(s), format="mjd")
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize("out_subfmt", ("str", "bytes"))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0.0, 1e-9, 1e-12])
t = Time(i, f, format="mjd", out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(
["54321.0", "54321.000000001", "54321.000000000001"], dtype=out_subfmt
)
assert np.all(t_value == expected)
assert np.all(Time(expected, format="mjd") == t)
# Explicit sub-format.
t = Time(i, f, format="mjd")
t_mjd_subfmt = t.to_value("mjd", subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize(
"fmt,string,val1,val2",
[
("jd", "2451544.5333981", 2451544.5, 0.0333981),
("decimalyear", "2000.54321", 2000.0, 0.54321),
("cxcsec", "100.0123456", 100.0123456, None),
("unix", "100.0123456", 100.0123456, None),
("gps", "100.0123456", 100.0123456, None),
("byear", "1950.1", 1950.1, None),
("jyear", "2000.1", 2000.1, None),
],
)
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt="str") == string
def test_basic_subformat_setting(self):
t = Time("2001", format="jyear", scale="tai")
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time("2001", format="jyear", scale="tai")
t.to_value("mjd", subfmt="str")
assert ("mjd", "str") in t.cache["format"]
t.to_value("mjd", "str")
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time("2001", format="jyear", scale="tai")
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time("2001", format="jyear", scale="tai")
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert (
t_s_2 == t2_s_40
), "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value("mjd", subfmt="decimal")
t2 = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value("mjd", subfmt="decimal")
t2_s_40 = t2.to_value("mjd", subfmt="decimal")
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize(
"f, s, t",
[
("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str),
],
)
def test_timedelta_basic(self, f, s, t):
dt = Time("58000", format="mjd", scale="tai") - Time(
"58001", format="mjd", scale="tai"
)
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time("J2000")
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match="format must be one of"):
t.to_value("julian")
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match="not among selected"):
Time("58000", format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(np.longdouble(58000), format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="str")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="long")
def test_wrong_subfmt(self):
t = Time(58000.0, format="mjd")
with pytest.raises(ValueError, match="must match one"):
t.to_value("mjd", subfmt="parrot")
with pytest.raises(ValueError, match="must match one"):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match="must match one"):
t.in_subfmt = "parrot"
def test_not_allowed_subfmt(self):
"""Test case where format has no defined subfmts"""
t = Time("J2000")
match = "subformat not allowed for format jyear_str"
with pytest.raises(ValueError, match=match):
t.to_value("jyear_str", subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", out_subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.in_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", format="jyear_str", in_subfmt="parrot")
def test_switch_to_format_with_no_out_subfmt(self):
t = Time("2001-01-01", out_subfmt="date_hm")
assert t.out_subfmt == "date_hm"
# Now do an in-place switch to format 'jyear_str' that has no subfmts
# where out_subfmt is changed to '*'.
t.format = "jyear_str"
assert t.out_subfmt == "*"
assert t.value == "J2001.001"
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r"bad day \(JD computed\)") as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.0])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format="jd", scale="tai")
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format="mjd", scale="tai")
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(["2000:001"], format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time("2000:001", format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time("2320-01-01", scale="tai").stardate)[:7] == "1368.99"
assert str(Time("2330-01-01", scale="tai").stardate)[:8] == "10552.76"
assert str(Time("2340-01-01", scale="tai").stardate)[:8] == "19734.02"
@pytest.mark.parametrize(
"dates",
[
(10000, "2329-05-26 03:02"),
(20000, "2340-04-15 19:05"),
(30000, "2351-03-07 11:08"),
],
)
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format="stardate")
t_iso = Time(t_star, format="iso", out_subfmt="date_hm")
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time("2000:001", format="yday", scale="tai")
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == "datetime"
assert t.scale == "utc"
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time("2001:001", format="yday")
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format="decimalyear")
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time("2000:001").jd
jd1 = Time("2001:001").jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd, jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format="jd", scale="tai")
assert t.fits == "0001-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0, format="jd", scale="tai")
assert t.fits == "+00000-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0 - 365.0, format="jd", scale="tai")
assert t.fits == "-00001-01-01T00:00:00.000"
def test_fits_year10000():
t = Time(5373484.5, format="jd", scale="tai")
assert t.fits == "+10000-01-01T00:00:00.000"
t = Time(5373484.5 - 365.0, format="jd", scale="tai")
assert t.fits == "9999-01-01T00:00:00.000"
t = Time(5373484.5, -1.0 / 24.0 / 3600.0, format="jd", scale="tai")
assert t.fits == "9999-12-31T23:59:59.000"
def test_dir():
t = Time("2000:001", format="yday", scale="tai")
assert "utc" in dir(t)
def test_time_from_epoch_jds():
"""Test that jd1/jd2 in a TimeFromEpoch format is always well-formed:
jd1 is an integral value and abs(jd2) <= 0.5.
"""
# From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will
# catch jd2 == 0 and a case of abs(jd2) == 0.5.
cxcsecs = np.linspace(0, 86400 * 1.5, 49)
for cxcsec in cxcsecs:
t = Time(cxcsec, format="cxcsec")
assert np.round(t.jd1) == t.jd1
assert np.abs(t.jd2) <= 0.5
t = Time(cxcsecs, format="cxcsec")
assert np.all(np.round(t.jd1) == t.jd1)
assert np.all(np.abs(t.jd2) <= 0.5)
assert np.any(np.abs(t.jd2) == 0.5) # At least one exactly 0.5
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format="mjd", scale="utc")
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert "Time" in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time("1900-01-01", scale="ut1")
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion(monkeypatch):
# Check that if we have internet, and downloading is allowed, we
# can get conversion to UT1 for the present, since we will download
# IERS_A in IERS_Auto.
monkeypatch.setattr("astropy.utils.iers.conf.auto_download", True)
Time(Time.now().cxcsec, format="cxcsec", scale="ut1")
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype(">f8")
little_endian = mjd.astype("<f8")
time_mjd = Time(mjd, format="mjd")
time_big = Time(big_endian, format="mjd")
time_little = Time(little_endian, format="mjd")
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = "longyear"
subfmts = (
(
"date",
r"(?P<year>[+-]\d{5})-%m-%d", # hybrid
"{year:+06d}-{mon:02d}-{day:02d}",
),
)
t = Time("+02000-02-03", format="longyear")
assert t.value == "+02000-02-03"
assert t.jd == Time("2000-02-03").jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (
("jd", 2451577.5),
("mjd", 51577.0),
("cxcsec", 65923264.184), # confirmed with Chandra.Time
("datetime", datetime.datetime(2000, 2, 3, 0, 0)),
("iso", "2000-02-03 00:00:00.000"),
):
t = Time("+02000-02-03", format="fits")
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_unix_tai_format():
t = Time("2020-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 37.0)
t = Time("1970-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 8 + 8.2e-05)
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="date_hms", precision=5)
tc = t.copy()
t.format = "isot"
assert t.precision == 5
assert t.out_subfmt == "date_hms"
assert t.value == "2000-02-03T00:00:00.00000"
t.format = "fits"
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="longdate")
t.format = "isot"
assert t.out_subfmt == "*" # longdate_hms not there, goes to default
assert t.value == "2000-02-03T00:00:00.000"
t.format = "fits"
assert t.out_subfmt == "*"
assert t.value == "2000-02-03T00:00:00.000" # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time("2007:001", scale="tai")
with pytest.raises(ValueError) as err:
t1.replicate(format="definitely_not_a_valid_format")
assert "format must be one of" in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time("2007:001", scale="tai")
assert "astropy_time" not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format="astropy_time")
assert "format must be one of" in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(
["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"],
format="iso",
scale="utc",
)
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10 * u.hour, tzname="US/Hawaii")
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError, match=r"does not support leap seconds"):
Time("2015-06-30 23:59:60.000").to_datetime()
@pytest.mark.skipif(not HAS_PYTZ, reason="requires pytz")
def test_to_datetime_pytz():
import pytz
tz = pytz.timezone("US/Hawaii")
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time("2010-09-03 00:00:00")
t2 = Time("2010-09-03 00:00:00")
# Time starts out without a cache
assert "cache" not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache["format"]["iso"] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache["scale"]["tai"] == t2.tai
# New Time object after scale transform does not have a cache yet
assert "cache" not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert "cache" not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert "cache" in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [
[[f"{y:04d}-{m:02d}-{d:02d}" for d in range(1, 3)] for m in range(5, 7)]
for y in range(2012, 2014)
]
cutf32 = Column(times)
cbytes = cutf32.astype("S")
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(["B1950"]))
tbytes = Time(Column([b"B1950"]))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b"2012-01-01", b"2012-01-01T00:00:00"])
assert np.all(Time(times) == Time(["2012-01-01", "2012-01-01T00:00:00"]))
def test_bytes_input():
tstring = "2011-01-02T03:04:05"
tbytes = b"2011-01-02T03:04:05"
assert tbytes.decode("ascii") == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == "S"
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format="cxcsec")
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time("2000:001", scale="utc")
t[()] = "2000:002"
assert t.value.startswith("2000:002")
# Transformed attribute is not writeable
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = "2005:001"
assert "Time object is read-only. Make a copy()" in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format="cxcsec")
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location=None".format(loc[0]) in str(err.value)
)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format="cxcsec", location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location={}".format(loc[0], loc[1]) in str(err.value)
)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format="cxcsec")
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location=None and "
"got location={}".format(loc[1]) in str(err.value)
)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
t[0, :] = Time([-3, -4], format="cxcsec", location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format="cxcsec")
assert t.cache == {}
t.iso
assert "iso" in t.cache["format"]
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:00:02.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [3, 4]])
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:01:40.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [200, 200]])
# Array of strings in yday format
t[:, 1] = ["1998:002", "1998:003"]
assert allclose_sec(t.value, [[1, 86400 * 1], [200, 86400 * 2]])
# Incompatible numeric value
t = Time(["2000:001", "2000:002"])
t[0] = "2001:001"
with pytest.raises(ValueError) as err:
t[0] = 100
assert "cannot convert value to a compatible Time object" in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object."""
# Set from time object with different scale
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = Time(["2000:010"], scale="tai")
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(["2000:001", "2000:002"], scale="utc")
t2.format = "jyear"
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format="cxcsec")
with pytest.raises(IndexError):
t["asdf"] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format="cxcsec")
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, "_delta_tdb_tt")
assert not hasattr(t, "_delta_ut1_utc")
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time("1999-01-01T01:01:01")
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strftime_array():
tstrings = ["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1995-12-31 23:59:60"]
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S").tolist() == tstrings
def test_strftime_array_2():
tstrings = [
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1995-12-31 23:59:60"],
]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime("%Y-%m-%d %H:%M:%S") == tstrings)
assert t.strftime("%Y-%m-%d %H:%M:%S").shape == tstrings.shape
def test_strftime_leapsecond():
time_string = "1995-12-31 23:59:60"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strptime_scalar():
"""Test of Time.strptime"""
time_string = "2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01", "1998-Jan-01 00:00:02"],
["1998-Jan-01 00:00:03", "1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, "%S")
def test_strptime_input_bytes_scalar():
time_string = b"2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [
[b"1998-Jan-01 00:00:01", b"1998-Jan-01 00:00:02"],
[b"1998-Jan-01 00:00:03", b"1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time("1995-12-31T23:59:60", format="isot")
time_obj2 = Time.strptime("1995-Dec-31 23:59:60", "%Y-%b-%d %H:%M:%S")
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time("0995-12-31T00:00:00", format="isot", scale="tai")
time_obj2 = Time.strptime("0995-Dec-31 00:00:00", "%Y-%b-%d %H:%M:%S", scale="tai")
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = "2007-May-04 21:08:12.123"
time_object = Time("2007-05-04 21:08:12.123")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S.%f")
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01.123", "1998-Jan-01 00:00:02.000001"],
["1998-Jan-01 00:00:03.000900", "1998-Jan-01 00:00:04.123456"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01.123", "1998-01-01 00:00:02.000001"],
["1998-01-01 00:00:03.000900", "1998-01-01 00:00:04.123456"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S.%f")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00.123"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == time_string
def test_strftime_scalar_fracsec_precision():
time_string = "2010-09-03 06:00:00.123123123"
t = Time(time_string)
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123"
t.precision = 9
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123123123"
def test_strftime_array_fracsec():
tstrings = [
"2010-09-03 00:00:00.123000",
"2005-09-03 06:00:00.000001",
"1995-12-31 23:59:60.000900",
]
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f").tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format="unix")
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, "1970-01-01 00:01:00")
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time("1970-01-01 00:01:00"))
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time("1970-01-01 00:01:00")])
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format="unix"))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format="unix"))
def test_insert_time_out_subfmt():
# Check insert() with out_subfmt set
T = Time(["1999-01-01", "1999-01-02"], out_subfmt="date")
T = T.insert(0, T[0])
assert T.out_subfmt == "date"
assert T[0] == T[1]
T = T.insert(1, "1999-01-03")
assert T.out_subfmt == "date"
assert str(T[1]) == "1999-01-03"
def test_insert_exceptions():
tm = Time(1, format="unix")
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert "cannot insert into scalar" in str(err.value)
tm = Time([1, 2], format="unix")
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert "axis must be 0" in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert "obj arg must be an integer" in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert "index -100 is out of bounds for axis 0 with size 2" in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
t = Time(dt64, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format="cxcsec", location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format="cxcsec", location=loc)
t2 = Time(1, format="cxcsec")
assert hash(t) != hash(t2)
t = Time("2000:180", scale="utc")
t2 = Time(t, scale="tai")
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format="sec")
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time("2000:001", format="not-a-format")
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200")
assert "Input values did not match any of the formats where" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200", format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "ValueError: Time 200 does not match iso format"
) == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "TypeError: Input values for iso class must be strings"
) == str(err.value)
def test_ymdhms_defaults():
t1 = Time({"year": 2001}, format="ymdhms")
assert t1 == Time("2001-01-01")
times_dict_ns = {
"year": [2001, 2002],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [8, 9],
"second": [10, 11],
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ("year", "month", "day", "hour", "minute", "second")
@pytest.mark.parametrize("tm_input", [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
@pytest.mark.parametrize("as_row", [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(["2001-02-04 06:08:10", "2002-03-05 07:09:11"])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {"year": [[2001, 2002], [2003, 2004]], "month": [2, 3], "day": 4}
time_shape = Time([["2001-02-04", "2002-03-04"], ["2003-02-04", "2004-03-04"]])
time = Time(times_dict_shape, format="ymdhms")
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
"year": 2016,
"month": 12,
"day": 31,
"hour": 23,
"minute": 59,
"second": 60.123456789,
}
tm = Time(time_dict, **kwargs)
assert tm == Time("2016-12-31T23:59:60.123456789")
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == "second":
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match="input must be dict or table-like"):
Time(10, format="ymdhms")
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({"year": 2019, "wrong": 1}, format="ymdhms")
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({"year": 2019, "minute": 1}, format="ymdhms")
def test_ymdhms_masked():
tm = Time({"year": [2000, 2001]}, format="ymdhms")
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time(
{
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
scale="utc",
)
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_ecsv(fmt):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t["a"].format == t2["a"].format
# Some loss of precision in the serialization
assert not np.all(t["a"] == t2["a"])
# But no loss in the format representation
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_fits(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.fits"
t.write(out, format="fits")
t2 = Table.read(out, format="fits", astropy_native=True)
# Currently the format is lost in FITS so set it back
t2["a"].format = fmt
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.skipif(not HAS_H5PY, reason="Needs h5py")
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_hdf5(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.h5"
t.write(str(out), format="hdf5", path="root", serialize_meta=True)
t2 = Table.read(str(out), format="hdf5", path="root")
assert t["a"].format == t2["a"].format
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time("J2015") + np.linspace(-1, 1, 10) * u.day
t[2] = Time(58000, format="mjd")
def test_format_subformat_compatibility():
"""Test that changing format with out_subfmt defined is not a problem.
See #9812, #9810."""
t = Time("2019-12-20", out_subfmt="date_??")
assert t.mjd == 58837.0
assert t.yday == "2019:354:00:00" # Preserves out_subfmt
t2 = t.replicate(format="mjd")
assert t2.out_subfmt == "*" # Changes to default
t2 = t.copy(format="mjd")
assert t2.out_subfmt == "*"
t2 = Time(t, format="mjd")
assert t2.out_subfmt == "*"
t2 = t.copy(format="yday")
assert t2.out_subfmt == "date_??"
assert t2.value == "2019:354:00:00"
t.format = "yday"
assert t.value == "2019:354:00:00"
assert t.out_subfmt == "date_??"
t = Time("2019-12-20", out_subfmt="date")
assert t.mjd == 58837.0
assert t.yday == "2019:354"
@pytest.mark.parametrize("use_fast_parser", ["force", "False"])
def test_format_fractional_string_parsing(use_fast_parser):
"""Test that string like "2022-08-01.123" does not parse as ISO.
See #6476 and the fix."""
with pytest.raises(
ValueError, match=r"Input values did not match the format class iso"
):
with conf.set_temp("use_fast_parser", use_fast_parser):
Time("2022-08-01.123", format="iso")
@pytest.mark.parametrize("fmt_name,fmt_class", TIME_FORMATS.items())
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class):
"""From a starting Time value, test that every valid combination of
to_value(format, subfmt) works. See #9812, #9361.
"""
t = Time("2000-01-01")
subfmts = list(subfmt[0] for subfmt in fmt_class.subfmts) + [None, "*"]
for subfmt in subfmts:
t.to_value(fmt_name, subfmt)
@pytest.mark.parametrize("location", [None, (45, 45)])
def test_location_init(location):
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances.
"""
tm = Time("J2010", location=location)
# Init from a scalar Time
tm2 = Time(tm)
assert np.all(tm.location == tm2.location)
assert type(tm.location) is type(tm2.location) # noqa: E721
# From a list of Times
tm2 = Time([tm, tm])
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa: E721
# Effectively the same as a list of Times, but just to be sure that
# Table mixin inititialization is working as expected.
tm2 = Table([[tm, tm]])["col0"]
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa: E721
def test_location_init_fail():
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances. Make sure exception is correct.
"""
tm = Time("J2010", location=(45, 45))
tm2 = Time("J2010")
with pytest.raises(
ValueError, match="cannot concatenate times unless all locations"
):
Time([tm, tm2])
def test_linspace():
"""Test `np.linspace` `__array_func__` implementation for scalar and arrays."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"])
t2 = Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"])
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts = np.linspace(t1[0], t2[0], 3)
assert ts[0].isclose(Time("2021-01-01 00:00:00"), atol=atol)
assert ts[1].isclose(Time("2021-01-01 00:30:00"), atol=atol)
assert ts[2].isclose(Time("2021-01-01 01:00:00"), atol=atol)
ts = np.linspace(t1, t2[0], 2, endpoint=False)
assert ts.shape == (2, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:30:00", "2021-01-01 12:30:00"]), atol=atol)
)
ts = np.linspace(t1, t2, 7)
assert ts.shape == (7, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:10:00", "2021-03-03 00:00:00"]), atol=atol)
)
assert all(
ts[5].isclose(Time(["2021-01-01 00:50:00", "2021-10-29 00:00:00"]), atol=atol)
)
assert all(
ts[6].isclose(Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"]), atol=atol)
)
def test_linspace_steps():
"""Test `np.linspace` `retstep` option."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-01 12:00:00"])
t2 = Time("2021-01-02 00:00:00")
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts, st = np.linspace(t1, t2, 7, retstep=True)
assert ts.shape == (7, 2)
assert st.shape == (2,)
assert all(ts[1].isclose(ts[0] + st, atol=atol))
assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol))
assert all(st.isclose(TimeDelta([14400, 7200], format="sec"), atol=atol))
def test_linspace_fmts():
"""Test `np.linspace` `__array_func__` implementation for start/endpoints
from different formats/systems.
"""
t1 = Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"])
t2 = Time(2458850, format="jd")
t3 = Time(1578009600, format="unix")
atol = 2 * np.finfo(float).eps * abs(t1 - Time([t2, t3])).max()
ts = np.linspace(t1, t2, 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-01 18:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-01 12:00:00"]), atol=atol)
)
ts = np.linspace(t1, Time([t2, t3]), 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-02 12:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-03 00:00:00"]), atol=atol)
)
def test_to_string():
dims = [8, 2, 8]
dx = np.arange(np.prod(dims)).reshape(dims)
tm = Time("2020-01-01", out_subfmt="date") + dx * u.day
exp_lines = [
"[[['2020-01-01' '2020-01-02' ... '2020-01-07' '2020-01-08']",
" ['2020-01-09' '2020-01-10' ... '2020-01-15' '2020-01-16']]",
"",
" [['2020-01-17' '2020-01-18' ... '2020-01-23' '2020-01-24']",
" ['2020-01-25' '2020-01-26' ... '2020-01-31' '2020-02-01']]",
"",
" ...",
"",
" [['2020-04-06' '2020-04-07' ... '2020-04-12' '2020-04-13']",
" ['2020-04-14' '2020-04-15' ... '2020-04-20' '2020-04-21']]",
"",
" [['2020-04-22' '2020-04-23' ... '2020-04-28' '2020-04-29']",
" ['2020-04-30' '2020-05-01' ... '2020-05-06' '2020-05-07']]]",
]
exp_str = "\n".join(exp_lines)
with np.printoptions(threshold=100, edgeitems=2, linewidth=75):
out_str = str(tm)
out_repr = repr(tm)
assert out_str == exp_str
exp_repr = f"<Time object: scale='utc' format='iso' value={exp_str}>"
assert out_repr == exp_repr
|
6a45b68a20704a52fe738cd76ab6316f2d62763518bf8035dddd4597bb66e4df | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import numpy as np
import pytest
import astropy.units as u
from astropy.time import Time, TimeDelta
class TestTimeComparisons:
"""Test Comparisons of Time and TimeDelta classes"""
def setup_method(self):
self.t1 = Time(np.arange(49995, 50005), format="mjd", scale="utc")
self.t2 = Time(np.arange(49000, 51000, 200), format="mjd", scale="utc")
def test_miscompares(self):
"""
If an incompatible object is compared to a Time object, == should
return False and != should return True. All other comparison
operators should raise a TypeError.
"""
t1 = Time("J2000", scale="utc")
for op, op_str in (
(operator.ge, ">="),
(operator.gt, ">"),
(operator.le, "<="),
(operator.lt, "<"),
):
with pytest.raises(TypeError):
op(t1, None)
# Keep == and != as they are specifically meant to test Time.__eq__
# and Time.__ne__
assert (t1 == None) is False # noqa: E711
assert (t1 != None) is True # noqa: E711
def test_time(self):
t1_lt_t2 = self.t1 < self.t2
assert np.all(
t1_lt_t2
== np.array(
[False, False, False, False, False, False, True, True, True, True]
)
)
t1_ge_t2 = self.t1 >= self.t2
assert np.all(t1_ge_t2 != t1_lt_t2)
t1_le_t2 = self.t1 <= self.t2
assert np.all(
t1_le_t2
== np.array(
[False, False, False, False, False, True, True, True, True, True]
)
)
t1_gt_t2 = self.t1 > self.t2
assert np.all(t1_gt_t2 != t1_le_t2)
t1_eq_t2 = self.t1 == self.t2
assert np.all(
t1_eq_t2
== np.array(
[False, False, False, False, False, True, False, False, False, False]
)
)
t1_ne_t2 = self.t1 != self.t2
assert np.all(t1_ne_t2 != t1_eq_t2)
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
t1_0_gt_t2 = self.t1[0] > self.t2
assert np.all(
t1_0_gt_t2
== np.array(
[True, True, True, True, True, False, False, False, False, False]
)
)
t1_gt_t2_0 = self.t1 > self.t2[0]
assert np.all(
t1_gt_t2_0
== np.array([True, True, True, True, True, True, True, True, True, True])
)
def test_time_boolean(self):
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
def test_timedelta(self):
dt = self.t2 - self.t1
with pytest.raises(TypeError):
self.t1 > dt
dt_gt_td0 = dt > TimeDelta(0.0, format="sec")
assert np.all(
dt_gt_td0
== np.array(
[False, False, False, False, False, False, True, True, True, True]
)
)
@pytest.mark.parametrize("swap", [True, False])
@pytest.mark.parametrize("time_delta", [True, False])
def test_isclose_time(swap, time_delta):
"""Test functionality of Time.isclose() method.
Run every test with 2 args in original order and swapped, and using
Quantity or TimeDelta for atol (when provided)."""
def isclose_swap(t1, t2, **kwargs):
if swap:
t1, t2 = t2, t1
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
# Start with original demonstration from #8742. In this issue both t2 == t1
# and t3 == t1 give False, but this may change with a newer ERFA.
t1 = Time("2018-07-24T10:41:56.807015240")
t2 = t1 + 0.0 * u.s
t3 = t1 + TimeDelta(0.0 * u.s)
assert isclose_swap(t1, t2)
assert isclose_swap(t1, t3)
t2 = t1 + 1 * u.s
assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day) # Test different unit
assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day)
t2 = t1 + [-1, 0, 2] * u.s
assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False])
t2 = t1 + 3 * np.finfo(float).eps * u.day
assert not isclose_swap(t1, t2)
def test_isclose_time_exceptions():
t1 = Time("2020:001")
t2 = t1 + 1 * u.s
match = "'other' argument must support subtraction with Time"
with pytest.raises(TypeError, match=match):
t1.isclose(1.5)
match = (
"'atol' argument must be a Quantity or TimeDelta instance, got float instead"
)
with pytest.raises(TypeError, match=match):
t1.isclose(t2, 1.5)
@pytest.mark.parametrize("swap", [True, False])
@pytest.mark.parametrize("time_delta", [True, False])
@pytest.mark.parametrize("other_quantity", [True, False])
def test_isclose_timedelta(swap, time_delta, other_quantity):
"""Test functionality of TimeDelta.isclose() method.
Run every test with 2 args in original order and swapped, and using
Quantity or TimeDelta for atol (when provided), and using Quantity or
TimeDelta for the other argument."""
def isclose_swap(t1, t2, **kwargs):
if swap:
t1, t2 = t2, t1
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
def isclose_other_quantity(t1, t2, **kwargs):
if other_quantity:
t2 = t2.to(u.day)
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
t1 = TimeDelta(1.0 * u.s)
t2 = t1 + 0.0 * u.s
t3 = t1 + TimeDelta(0.0 * u.s)
assert isclose_swap(t1, t2)
assert isclose_swap(t1, t3)
assert isclose_other_quantity(t1, t2)
assert isclose_other_quantity(t1, t3)
t2 = t1 + 1 * u.s
assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day)
assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day)
assert isclose_other_quantity(t1, t2, atol=1.5 / 86400 * u.day)
assert not isclose_other_quantity(t1, t2, atol=0.5 / 86400 * u.day)
t1 = TimeDelta(0 * u.s)
t2 = t1 + [-1, 0, 2] * u.s
assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False])
assert np.all(isclose_other_quantity(t1, t2, atol=1.5 * u.s) == [True, True, False])
# Check with rtol
# 1 * 0.6 + 0.5 = 1.1 --> 1 <= 1.1 --> True
# 0 * 0.6 + 0.5 = 0.5 --> 0 <= 0.5 --> True
# 2 * 0.6 + 0.5 = 1.7 --> 2 <= 1.7 --> False
assert np.all(t1.isclose(t2, atol=0.5 * u.s, rtol=0.6) == [True, True, False])
t2 = t1 + 2 * np.finfo(float).eps * u.day
assert not isclose_swap(t1, t2)
assert not isclose_other_quantity(t1, t2)
def test_isclose_timedelta_exceptions():
t1 = TimeDelta(1 * u.s)
t2 = t1 + 1 * u.s
match = "other' argument must support conversion to days"
with pytest.raises(TypeError, match=match):
t1.isclose(1.5)
match = (
"'atol' argument must be a Quantity or TimeDelta instance, got float instead"
)
with pytest.raises(TypeError, match=match):
t1.isclose(t2, 1.5)
|
6717ab08fbd0d9ee2749f190d23b26bb6f8146e8d5bca975a3aafc4d687e93f2 | # Licensed under a 3-clause BSD style license. See LICENSE.rst except
# for parts explicitly labelled as being (largely) copies of numpy
# implementations; for those, see licenses/NUMPY_LICENSE.rst.
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from numpy.lib import recfunctions as rfn
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from astropy.utils import isiterable
from astropy.utils.compat import NUMPY_LT_1_23
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides, "ENABLE_ARRAY_FUNCTION", True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diagonal, np.diagflat, np.empty_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.median, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis, np.take_along_axis, np.put_along_axis,
np.linalg.cond, np.linalg.multi_dot,
} # fmt: skip
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {np.ediff1d}
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday, np.all, np.any, np.sometrue, np.alltrue,
} # fmt: skip
# Could be supported if we had a natural logarithm unit.
UNSUPPORTED_FUNCTIONS |= {np.linalg.slogdet}
TBD_FUNCTIONS = {
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.find_duplicates, rfn.recursive_fill_fields, rfn.require_fields,
rfn.repack_fields, rfn.stack_arrays,
} # fmt: skip
UNSUPPORTED_FUNCTIONS |= TBD_FUNCTIONS
IGNORED_FUNCTIONS = {
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
# functions taking record arrays (which are deprecated)
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
} # fmt: skip
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None, module=np):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(module, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None or module is not np:
return functools.partial(self.__call__, helps=helps, module=module)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
# fmt: off
@function_helper(
helps={
np.copy, np.asfarray, np.real_if_close, np.sort_complex, np.resize,
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft,
np.fft.fft2, np.fft.ifft2, np.fft.rfft2, np.fft.irfft2,
np.fft.fftn, np.fft.ifftn, np.fft.rfftn, np.fft.irfftn,
np.fft.hfft, np.fft.ihfft,
np.linalg.eigvals, np.linalg.eigvalsh,
}
)
# fmt: on
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
@function_helper(helps={np.fft.fftshift, np.fft.ifftshift})
def invariant_x_helper(x, *args, **kwargs):
return (x.view(np.ndarray),) + args, kwargs, x.unit, None
# Note that ones_like does *not* work by default since if one creates an empty
# array with a unit, one cannot just fill it with unity. Indeed, in this
# respect, it is a bit of an odd function for Quantity. On the other hand, it
# matches the idea that a unit is the same as the quantity with that unit and
# value of 1. Also, it used to work without __array_function__.
# zeros_like does work by default for regular quantities, because numpy first
# creates an empty array with the unit and then fills it with 0 (which can have
# any unit), but for structured dtype this fails (0 cannot have an arbitrary
# structured unit), so we include it here too.
@function_helper(helps={np.ones_like, np.zeros_like})
def like_helper(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop("subok", True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError(
"Can only apply 'sinc' function to quantities with angle units"
)
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(
p.to_value(radian), discont.to_value(radian), axis=axis
)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get("subok", True) else None
return (a.view(np.ndarray), a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask, a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask, values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask, arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask, vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return (dst.view(np.ndarray), dst._to_own_unit(src)) + args, kwargs, None, None
elif isinstance(src, Quantity):
return (dst, src.to_value(dimensionless_unscaled)) + args, kwargs, None, None
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return (
(x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit,
None,
)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
# Note: this should keep the dtype the same
return tuple(Quantity(a, copy=False, subok=True, dtype=None) for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args, unit_from_first=False):
"""Convert to arrays in units of the first argument that has a unit.
If unit_from_first, take the unit of the first argument regardless
whether it actually defined a unit (e.g., dimensionless for arrays).
"""
# Turn first argument into a quantity.
q = _as_quantity(args[0])
if len(args) == 1:
return (q.value,), q.unit
# If we care about the unit being explicit, then check whether this
# argument actually had a unit, or was likely inferred.
if not unit_from_first and (
q.unit is q._default_unit and not hasattr(args[0], "unit")
):
# Here, the argument could still be things like [10*u.one, 11.*u.one]),
# i.e., properly dimensionless. So, we only override with anything
# that has a unit not equivalent to dimensionless (fine to ignore other
# dimensionless units pass, even if explicitly given).
for arg in args[1:]:
trial = _as_quantity(arg)
if not trial.unit.is_equivalent(q.unit):
# Use any explicit unit not equivalent to dimensionless.
q = trial
break
# We use the private _to_own_unit method here instead of just
# converting everything to quantity and then do .to_value(qs0.unit)
# as we want to allow arbitrary unit for 0, inf, and nan.
try:
arrays = tuple((q._to_own_unit(arg)) for arg in args)
except TypeError:
raise NotImplementedError
return arrays, q.unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs["out"] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
@function_helper
def concatenate(arrays, axis=0, out=None, **kwargs):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis, **kwargs)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim, final_size) = np.core.shape_base._block_setup(
arrays
)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values, unit_from_first=True)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values, unit_from_first=True)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode="constant", **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in "constant_values", "end_values":
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple)
else array._to_own_unit(v)
)
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
@function_helper(helps=({np.quantile, np.nanquantile}))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop("out", None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper(helps={np.isclose, np.allclose})
def close(a, b, rtol=1e-05, atol=1e-08, *args, **kwargs):
from astropy.units import Quantity
(a, b), unit = _quantities2arrays(a, b, unit_from_first=True)
# Allow number without a unit as having the unit.
atol = Quantity(atol, unit).value
return (a, b, rtol, atol) + args, kwargs, None, None
@function_helper
def array_equal(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper
def array_equiv(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(
helps={
np.cross,
np.inner,
np.vdot,
np.tensordot,
np.kron,
np.correlate,
np.convolve,
}
)
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs["out"] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs), dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins, unit_from_first=True)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if density:
unit = (unit or 1) / a.unit
return (
(a.value, bins, range),
{"weights": weights, "density": density},
(unit, a.unit),
None,
)
@function_helper(helps=np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if density:
unit = (unit or 1) / x.unit / y.unit
return (
(x.value, y.value, bins, range),
{"weights": weights, "density": density},
(unit, x.unit, y.unit),
None,
)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
"The dimension of bins must be equal to the dimension of the sample x."
)
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, sample_units)]
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return (
(sample, bins, range),
{"weights": weights, "density": density},
(unit, sample_units),
None,
)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get("axis", None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if not isinstance(start, LogQuantity) or not isinstance(stop, LogQuantity):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(
ar, return_index=False, return_inverse=False, return_counts=False, axis=None
):
unit = ar.unit
n_index = sum(bool(i) for i in (return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts, axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
@dispatched_function
def array_repr(arr, *args, **kwargs):
# TODO: The addition of "unit='...'" doesn't worry about line
# length. Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
cls_name = arr.__class__.__name__
fake_name = "_" * len(cls_name)
fake_cls = type(fake_name, (np.ndarray,), {})
no_unit = np.array_repr(arr.view(fake_cls), *args, **kwargs).replace(
fake_name, cls_name
)
unit_part = f"unit='{arr.unit}'"
pre, dtype, post = no_unit.rpartition("dtype")
if dtype:
return f"{pre}{unit_part}, {dtype}{post}", None, None
else:
return f"{no_unit[:-1]}, {unit_part})", None, None
@dispatched_function
def array_str(arr, *args, **kwargs):
# TODO: The addition of the unit doesn't worry about line length.
# Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
no_unit = np.array_str(arr.value, *args, **kwargs)
return no_unit + arr._unitstr, None, None
@function_helper
def array2string(a, *args, **kwargs):
# array2string breaks on quantities as it tries to turn individual
# items into float, which works only for dimensionless. Since the
# defaults would not keep any unit anyway, this is rather pointless -
# we're better off just passing on the array view. However, one can
# also work around this by passing on a formatter (as is done in Angle).
# So, we do nothing if the formatter argument is present and has the
# relevant formatter for our dtype.
formatter = args[6] if len(args) >= 7 else kwargs.get("formatter", None)
if formatter is None:
a = a.value
else:
# See whether it covers our dtype.
from numpy.core.arrayprint import _get_format_function
with np.printoptions(formatter=formatter) as options:
try:
ff = _get_format_function(a.value, **options)
except Exception:
# Shouldn't happen, but possibly we're just not being smart
# enough, so let's pass things on as is.
pass
else:
# If the selected format function is that of numpy, we know
# things will fail
if "numpy" in ff.__module__:
a = a.value
return (a,) + args, kwargs, None, None
@function_helper
def diag(v, *args, **kwargs):
# Function works for *getting* the diagonal, but not *setting*.
# So, override always.
return (v.value,) + args, kwargs, v.unit, None
@function_helper(module=np.linalg)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
unit = a.unit
if compute_uv:
unit = (None, unit, None)
return ((a.view(np.ndarray), full_matrices, compute_uv, hermitian), {}, unit, None)
def _interpret_tol(tol, unit):
from astropy.units import Quantity
return Quantity(tol, unit).value
@function_helper(module=np.linalg)
def matrix_rank(M, tol=None, *args, **kwargs):
if tol is not None:
tol = _interpret_tol(tol, M.unit)
return (M.view(np.ndarray), tol) + args, kwargs, None, None
@function_helper(helps={np.linalg.inv, np.linalg.tensorinv})
def inv(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def pinv(a, rcond=1e-15, *args, **kwargs):
rcond = _interpret_tol(rcond, a.unit)
return (a.view(np.ndarray), rcond) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def det(a):
return (a.view(np.ndarray),), {}, a.unit ** a.shape[-1], None
@function_helper(helps={np.linalg.solve, np.linalg.tensorsolve})
def solve(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
return (
(a.view(np.ndarray), b.view(np.ndarray)) + args,
kwargs,
b.unit / a.unit,
None,
)
@function_helper(module=np.linalg)
def lstsq(a, b, rcond="warn"):
a, b = _as_quantities(a, b)
if rcond not in (None, "warn", -1):
rcond = _interpret_tol(rcond, a.unit)
return (
(a.view(np.ndarray), b.view(np.ndarray), rcond),
{},
(b.unit / a.unit, b.unit**2, None, a.unit),
None,
)
@function_helper(module=np.linalg)
def norm(x, ord=None, *args, **kwargs):
if ord == 0:
from astropy.units import dimensionless_unscaled
unit = dimensionless_unscaled
else:
unit = x.unit
return (x.view(np.ndarray), ord) + args, kwargs, unit, None
@function_helper(module=np.linalg)
def matrix_power(a, n):
return (a.value, n), {}, a.unit**n, None
@function_helper(module=np.linalg)
def cholesky(a):
return (a.value,), {}, a.unit**0.5, None
@function_helper(module=np.linalg)
def qr(a, mode="reduced"):
if mode.startswith("e"):
units = None
elif mode == "r":
units = a.unit
else:
from astropy.units import dimensionless_unscaled
units = (dimensionless_unscaled, a.unit)
return (a.value, mode), {}, units, None
@function_helper(helps={np.linalg.eig, np.linalg.eigh})
def eig(a, *args, **kwargs):
from astropy.units import dimensionless_unscaled
return (a.value,) + args, kwargs, (a.unit, dimensionless_unscaled), None
# ======================= np.lib.recfunctions =======================
@function_helper(module=np.lib.recfunctions)
def structured_to_unstructured(arr, *args, **kwargs):
"""
Convert a structured quantity to an unstructured one.
This only works if all the units are compatible.
"""
from astropy.units import StructuredUnit
target_unit = arr.unit.values()[0]
def replace_unit(x):
if isinstance(x, StructuredUnit):
return x._recursively_apply(replace_unit)
else:
return target_unit
to_unit = arr.unit._recursively_apply(replace_unit)
return (arr.to_value(to_unit),) + args, kwargs, target_unit, None
def _build_structured_unit(dtype, unit):
"""Build structured unit from dtype
Parameters
----------
dtype : `numpy.dtype`
unit : `astropy.units.Unit`
Returns
-------
`astropy.units.Unit` or tuple
"""
if dtype.fields is None:
return unit
return tuple(_build_structured_unit(v[0], unit) for v in dtype.fields.values())
@function_helper(module=np.lib.recfunctions)
def unstructured_to_structured(arr, dtype, *args, **kwargs):
from astropy.units import StructuredUnit
target_unit = StructuredUnit(_build_structured_unit(dtype, arr.unit))
return (arr.to_value(arr.unit), dtype) + args, kwargs, target_unit, None
def _izip_units_flat(iterable):
"""Returns an iterator of collapsing any nested unit structure.
Parameters
----------
iterable : Iterable[StructuredUnit | Unit] or StructuredUnit
A structured unit or iterable thereof.
Yields
------
unit
"""
from astropy.units import StructuredUnit
# Make Structured unit (pass-through if it is already).
units = StructuredUnit(iterable)
# Yield from structured unit.
for v in units.values():
if isinstance(v, StructuredUnit):
yield from _izip_units_flat(v)
else:
yield v
@function_helper(helps=rfn.merge_arrays)
def merge_arrays(
seqarrays,
fill_value=-1,
flatten=False,
usemask=False,
asrecarray=False,
):
"""Merge structured Quantities field by field.
Like :func:`numpy.lib.recfunctions.merge_arrays`. Note that ``usemask`` and
``asrecarray`` are not supported at this time and will raise a ValueError if
not `False`.
"""
from astropy.units import Quantity, StructuredUnit
if asrecarray:
# TODO? implement if Quantity ever supports rec.array
raise ValueError("asrecarray=True is not supported.")
if usemask:
# TODO: use MaskedQuantity for this case
raise ValueError("usemask=True is not supported.")
# Do we have a single Quantity as input?
if isinstance(seqarrays, Quantity):
seqarrays = (seqarrays,)
# Note: this also converts ndarray -> Quantity[dimensionless]
seqarrays = _as_quantities(*seqarrays)
arrays = tuple(q.value for q in seqarrays)
units = tuple(q.unit for q in seqarrays)
if flatten:
unit = StructuredUnit(tuple(_izip_units_flat(units)))
elif len(arrays) == 1:
unit = StructuredUnit(units[0])
else:
unit = StructuredUnit(units)
return (
(arrays,),
dict(
fill_value=fill_value,
flatten=flatten,
usemask=usemask,
asrecarray=asrecarray,
),
unit,
None,
)
|
811f3645dd3a6fa46bf183467787a351f82a815c5d251410c36d8d7ab7c4d9d6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Separate tests specifically for equivalencies."""
import numpy as np
# THIRD-PARTY
import pytest
from numpy.testing import assert_allclose
# LOCAL
from astropy import constants
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units.equivalencies import Equivalency
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_dimensionless_angles():
# test that the angles_dimensionless option allows one to change
# by any order in radian in the unit (#1161)
rad1 = u.dimensionless_angles()
assert u.radian.to(1, equivalencies=rad1) == 1.0
assert u.deg.to(1, equivalencies=rad1) == u.deg.to(u.rad)
assert u.steradian.to(1, equivalencies=rad1) == 1.0
assert u.dimensionless_unscaled.to(u.steradian, equivalencies=rad1) == 1.0
# now quantities
assert (1.0 * u.radian).to_value(1, equivalencies=rad1) == 1.0
assert (1.0 * u.deg).to_value(1, equivalencies=rad1) == u.deg.to(u.rad)
assert (1.0 * u.steradian).to_value(1, equivalencies=rad1) == 1.0
# more complicated example
I = 1.0e45 * u.g * u.cm**2 # noqa: E741
Omega = u.cycle / (1.0 * u.s)
Erot = 0.5 * I * Omega**2
# check that equivalency makes this work
Erot_in_erg1 = Erot.to(u.erg, equivalencies=rad1)
# and check that value is correct
assert_allclose(Erot_in_erg1.value, (Erot / u.radian**2).to_value(u.erg))
# test built-in equivalency in subclass
class MyRad1(u.Quantity):
_equivalencies = rad1
phase = MyRad1(1.0, u.cycle)
assert phase.to_value(1) == u.cycle.to(u.radian)
@pytest.mark.parametrize("log_unit", (u.mag, u.dex, u.dB))
def test_logarithmic(log_unit):
# check conversion of mag, dB, and dex to dimensionless and vice versa
with pytest.raises(u.UnitsError):
log_unit.to(1, 0.0)
with pytest.raises(u.UnitsError):
u.dimensionless_unscaled.to(log_unit)
assert log_unit.to(1, 0.0, equivalencies=u.logarithmic()) == 1.0
assert u.dimensionless_unscaled.to(log_unit, equivalencies=u.logarithmic()) == 0.0
# also try with quantities
q_dex = np.array([0.0, -1.0, 1.0, 2.0]) * u.dex
q_expected = 10.0**q_dex.value * u.dimensionless_unscaled
q_log_unit = q_dex.to(log_unit)
assert np.all(q_log_unit.to(1, equivalencies=u.logarithmic()) == q_expected)
assert np.all(q_expected.to(log_unit, equivalencies=u.logarithmic()) == q_log_unit)
with u.set_enabled_equivalencies(u.logarithmic()):
assert np.all(np.abs(q_log_unit - q_expected.to(log_unit)) < 1.0e-10 * log_unit)
doppler_functions = [u.doppler_optical, u.doppler_radio, u.doppler_relativistic]
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_frequency_0(function):
rest = 105.01 * u.GHz
velo0 = rest.to(u.km / u.s, equivalencies=function(rest))
assert velo0.value == 0
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_wavelength_0(function):
rest = 105.01 * u.GHz
q1 = 0.00285489437196 * u.m
velo0 = q1.to(u.km / u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_energy_0(function):
rest = 105.01 * u.GHz
q1 = 0.0004342864648539744 * u.eV
velo0 = q1.to(u.km / u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_frequency_circle(function):
rest = 105.01 * u.GHz
shifted = 105.03 * u.GHz
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
freq = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(freq.value, shifted.value, decimal=7)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_wavelength_circle(function):
rest = 105.01 * u.nm
shifted = 105.03 * u.nm
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
wav = velo.to(u.nm, equivalencies=function(rest))
np.testing.assert_almost_equal(wav.value, shifted.value, decimal=7)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_energy_circle(function):
rest = 1.0501 * u.eV
shifted = 1.0503 * u.eV
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
en = velo.to(u.eV, equivalencies=function(rest))
np.testing.assert_almost_equal(en.value, shifted.value, decimal=7)
values_ghz = (999.899940784289, 999.8999307714406, 999.8999357778647)
@pytest.mark.parametrize(
("function", "value"), list(zip(doppler_functions, values_ghz))
)
def test_30kms(function, value):
rest = 1000 * u.GHz
velo = 30 * u.km / u.s
shifted = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(shifted.value, value, decimal=7)
bad_values = (5, 5 * u.Jy, None)
@pytest.mark.parametrize(
("function", "value"), list(zip(doppler_functions, bad_values))
)
def test_bad_restfreqs(function, value):
with pytest.raises(u.UnitsError):
function(value)
@pytest.mark.parametrize(
("z", "rv_ans"),
[
(0, 0 * (u.km / u.s)),
(0.001, 299642.56184583 * (u.m / u.s)),
(-1, -2.99792458e8 * (u.m / u.s)),
],
)
def test_doppler_redshift(z, rv_ans):
z_in = z * u.dimensionless_unscaled
rv_out = z_in.to(u.km / u.s, u.doppler_redshift())
z_out = rv_out.to(u.dimensionless_unscaled, u.doppler_redshift())
assert_quantity_allclose(rv_out, rv_ans)
assert_quantity_allclose(z_out, z_in) # Check roundtrip
def test_doppler_redshift_no_cosmology():
from astropy.cosmology.units import redshift
with pytest.raises(u.UnitConversionError, match="not convertible"):
(0 * (u.km / u.s)).to(redshift, u.doppler_redshift())
def test_massenergy():
# The relative tolerance of these tests is set by the uncertainties
# in the charge of the electron, which is known to about
# 3e-9 (relative tolerance). Therefore, we limit the
# precision of the tests to 1e-7 to be safe. The masses are
# (loosely) known to ~ 5e-8 rel tolerance, so we couldn't test to
# 1e-7 if we used the values from astropy.constants; that is,
# they might change by more than 1e-7 in some future update, so instead
# they are hardwired here.
# Electron, proton, neutron, muon, 1g
mass_eV = u.Quantity(
[510.998928e3, 938.272046e6, 939.565378e6, 105.6583715e6, 5.60958884539e32],
u.eV,
)
mass_g = u.Quantity(
[9.10938291e-28, 1.672621777e-24, 1.674927351e-24, 1.88353147e-25, 1], u.g
)
# Test both ways
assert np.allclose(
mass_eV.to_value(u.g, equivalencies=u.mass_energy()), mass_g.value, rtol=1e-7
)
assert np.allclose(
mass_g.to_value(u.eV, equivalencies=u.mass_energy()), mass_eV.value, rtol=1e-7
)
# Basic tests of 'derived' equivalencies
# Surface density
sdens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**2)
sdens_g = u.Quantity(1e-4, u.g / u.cm**2)
assert np.allclose(
sdens_eV.to_value(u.g / u.cm**2, equivalencies=u.mass_energy()),
sdens_g.value,
rtol=1e-7,
)
assert np.allclose(
sdens_g.to_value(u.eV / u.m**2, equivalencies=u.mass_energy()),
sdens_eV.value,
rtol=1e-7,
)
# Density
dens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**3)
dens_g = u.Quantity(1e-6, u.g / u.cm**3)
assert np.allclose(
dens_eV.to_value(u.g / u.cm**3, equivalencies=u.mass_energy()),
dens_g.value,
rtol=1e-7,
)
assert np.allclose(
dens_g.to_value(u.eV / u.m**3, equivalencies=u.mass_energy()),
dens_eV.value,
rtol=1e-7,
)
# Power
pow_eV = u.Quantity(5.60958884539e32, u.eV / u.s)
pow_g = u.Quantity(1, u.g / u.s)
assert np.allclose(
pow_eV.to_value(u.g / u.s, equivalencies=u.mass_energy()),
pow_g.value,
rtol=1e-7,
)
assert np.allclose(
pow_g.to_value(u.eV / u.s, equivalencies=u.mass_energy()),
pow_eV.value,
rtol=1e-7,
)
def test_is_equivalent():
assert u.m.is_equivalent(u.pc)
assert u.cycle.is_equivalent(u.mas)
assert not u.cycle.is_equivalent(u.dimensionless_unscaled)
assert u.cycle.is_equivalent(u.dimensionless_unscaled, u.dimensionless_angles())
assert not (u.Hz.is_equivalent(u.J))
assert u.Hz.is_equivalent(u.J, u.spectral())
assert u.J.is_equivalent(u.Hz, u.spectral())
assert u.pc.is_equivalent(u.arcsecond, u.parallax())
assert u.arcminute.is_equivalent(u.au, u.parallax())
# Pass a tuple for multiple possibilities
assert u.cm.is_equivalent((u.m, u.s, u.kg))
assert u.ms.is_equivalent((u.m, u.s, u.kg))
assert u.g.is_equivalent((u.m, u.s, u.kg))
assert not u.L.is_equivalent((u.m, u.s, u.kg))
assert not (u.km / u.s).is_equivalent((u.m, u.s, u.kg))
def test_parallax():
a = u.arcsecond.to(u.pc, 10, u.parallax())
assert_allclose(a, 0.10, rtol=1.0e-12)
b = u.pc.to(u.arcsecond, a, u.parallax())
assert_allclose(b, 10, rtol=1.0e-12)
a = u.arcminute.to(u.au, 1, u.parallax())
assert_allclose(a, 3437.746770785, rtol=1.0e-12)
b = u.au.to(u.arcminute, a, u.parallax())
assert_allclose(b, 1, rtol=1.0e-12)
val = (-1 * u.mas).to(u.pc, u.parallax())
assert np.isnan(val.value)
val = (-1 * u.mas).to_value(u.pc, u.parallax())
assert np.isnan(val)
def test_parallax2():
a = u.arcsecond.to(u.pc, [0.1, 2.5], u.parallax())
assert_allclose(a, [10, 0.4], rtol=1.0e-12)
def test_spectral():
a = u.AA.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e18)
b = u.Hz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.AA.to(u.MHz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e12)
b = u.MHz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.m.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e8)
b = u.Hz.to(u.m, a, u.spectral())
assert_allclose(b, 1)
def test_spectral2():
a = u.nm.to(u.J, 500, u.spectral())
assert_allclose(a, 3.972891366538605e-19)
b = u.J.to(u.nm, a, u.spectral())
assert_allclose(b, 500)
a = u.AA.to(u.Hz, 1, u.spectral())
b = u.Hz.to(u.J, a, u.spectral())
c = u.AA.to(u.J, 1, u.spectral())
assert_allclose(b, c)
c = u.J.to(u.Hz, b, u.spectral())
assert_allclose(a, c)
def test_spectral3():
a = u.nm.to(u.Hz, [1000, 2000], u.spectral())
assert_allclose(a, [2.99792458e14, 1.49896229e14])
@pytest.mark.parametrize(
("in_val", "in_unit"),
[
([0.1, 5000.0, 10000.0], u.AA),
([1e5, 2.0, 1.0], u.micron**-1),
([2.99792458e19, 5.99584916e14, 2.99792458e14], u.Hz),
([1.98644568e-14, 3.97289137e-19, 1.98644568e-19], u.J),
],
)
def test_spectral4(in_val, in_unit):
"""Wave number conversion w.r.t. wavelength, freq, and energy."""
# Spectroscopic and angular
out_units = [u.micron**-1, u.radian / u.micron]
answers = [[1e5, 2.0, 1.0], [6.28318531e05, 12.5663706, 6.28318531]]
for out_unit, ans in zip(out_units, answers):
# Forward
a = in_unit.to(out_unit, in_val, u.spectral())
assert_allclose(a, ans)
# Backward
b = out_unit.to(in_unit, ans, u.spectral())
assert_allclose(b, in_val)
@pytest.mark.parametrize(
"wav", (3500 * u.AA, 8.5654988e14 * u.Hz, 1 / (3500 * u.AA), 5.67555959e-19 * u.J)
)
def test_spectraldensity2(wav):
# flux density
flambda = u.erg / u.angstrom / u.cm**2 / u.s
fnu = u.erg / u.Hz / u.cm**2 / u.s
a = flambda.to(fnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
# integrated flux
f_int = u.erg / u.cm**2 / u.s
phot_int = u.ph / u.cm**2 / u.s
a = f_int.to(phot_int, 1, u.spectral_density(wav))
assert_allclose(a, 1.7619408e11)
a = phot_int.to(f_int, 1, u.spectral_density(wav))
assert_allclose(a, 5.67555959e-12)
# luminosity density
llambda = u.erg / u.angstrom / u.s
lnu = u.erg / u.Hz / u.s
a = llambda.to(lnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
a = lnu.to(llambda, 1, u.spectral_density(wav))
assert_allclose(a, 2.44728537142857e11)
def test_spectraldensity3():
# Define F_nu in Jy
f_nu = u.Jy
# Define F_lambda in ergs / cm^2 / s / micron
f_lambda = u.erg / u.cm**2 / u.s / u.micron
# 1 GHz
one_ghz = u.Quantity(1, u.GHz)
# Convert to ergs / cm^2 / s / Hz
assert_allclose(f_nu.to(u.erg / u.cm**2 / u.s / u.Hz, 1.0), 1.0e-23, 10)
# Convert to ergs / cm^2 / s at 10 Ghz
assert_allclose(
f_nu.to(
u.erg / u.cm**2 / u.s, 1.0, equivalencies=u.spectral_density(one_ghz * 10)
),
1.0e-13,
)
# Convert to F_lambda at 1 Ghz
assert_allclose(
f_nu.to(f_lambda, 1.0, equivalencies=u.spectral_density(one_ghz)),
3.335640951981521e-20,
)
# Convert to Jy at 1 Ghz
assert_allclose(
f_lambda.to(u.Jy, 1.0, equivalencies=u.spectral_density(one_ghz)),
1.0 / 3.335640951981521e-20,
)
# Convert to ergs / cm^2 / s at 10 microns
assert_allclose(
f_lambda.to(
u.erg / u.cm**2 / u.s,
1.0,
equivalencies=u.spectral_density(u.Quantity(10, u.micron)),
),
10.0,
)
def test_spectraldensity4():
"""PHOTLAM and PHOTNU conversions."""
flam = u.erg / (u.cm**2 * u.s * u.AA)
fnu = u.erg / (u.cm**2 * u.s * u.Hz)
photlam = u.photon / (u.cm**2 * u.s * u.AA)
photnu = u.photon / (u.cm**2 * u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_photlam = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_photnu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
flux_jy = [3.20735792e-2, 3.29903646e-2, 3.21727226e-2]
flux_stmag = [12.41858665, 12.38919182, 12.41764379]
flux_abmag = [12.63463143, 12.60403221, 12.63128047]
# PHOTLAM <--> FLAM
assert_allclose(
photlam.to(flam, flux_photlam, u.spectral_density(wave)), flux_flam, rtol=1e-6
)
assert_allclose(
flam.to(photlam, flux_flam, u.spectral_density(wave)), flux_photlam, rtol=1e-6
)
# PHOTLAM <--> FNU
assert_allclose(
photlam.to(fnu, flux_photlam, u.spectral_density(wave)), flux_fnu, rtol=1e-6
)
assert_allclose(
fnu.to(photlam, flux_fnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6
)
# PHOTLAM <--> Jy
assert_allclose(
photlam.to(u.Jy, flux_photlam, u.spectral_density(wave)), flux_jy, rtol=1e-6
)
assert_allclose(
u.Jy.to(photlam, flux_jy, u.spectral_density(wave)), flux_photlam, rtol=1e-6
)
# PHOTLAM <--> PHOTNU
assert_allclose(
photlam.to(photnu, flux_photlam, u.spectral_density(wave)),
flux_photnu,
rtol=1e-6,
)
assert_allclose(
photnu.to(photlam, flux_photnu, u.spectral_density(wave)),
flux_photlam,
rtol=1e-6,
)
# PHOTNU <--> FNU
assert_allclose(
photnu.to(fnu, flux_photnu, u.spectral_density(wave)), flux_fnu, rtol=1e-6
)
assert_allclose(
fnu.to(photnu, flux_fnu, u.spectral_density(wave)), flux_photnu, rtol=1e-6
)
# PHOTNU <--> FLAM
assert_allclose(
photnu.to(flam, flux_photnu, u.spectral_density(wave)), flux_flam, rtol=1e-6
)
assert_allclose(
flam.to(photnu, flux_flam, u.spectral_density(wave)), flux_photnu, rtol=1e-6
)
# PHOTLAM <--> STMAG
assert_allclose(
photlam.to(u.STmag, flux_photlam, u.spectral_density(wave)),
flux_stmag,
rtol=1e-6,
)
assert_allclose(
u.STmag.to(photlam, flux_stmag, u.spectral_density(wave)),
flux_photlam,
rtol=1e-6,
)
# PHOTLAM <--> ABMAG
assert_allclose(
photlam.to(u.ABmag, flux_photlam, u.spectral_density(wave)),
flux_abmag,
rtol=1e-6,
)
assert_allclose(
u.ABmag.to(photlam, flux_abmag, u.spectral_density(wave)),
flux_photlam,
rtol=1e-6,
)
def test_spectraldensity5():
"""Test photon luminosity density conversions."""
L_la = u.erg / (u.s * u.AA)
L_nu = u.erg / (u.s * u.Hz)
phot_L_la = u.photon / (u.s * u.AA)
phot_L_nu = u.photon / (u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_phot_L_la = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_phot_L_nu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_L_la = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_L_nu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# PHOTLAM <--> FLAM
assert_allclose(
phot_L_la.to(L_la, flux_phot_L_la, u.spectral_density(wave)),
flux_L_la,
rtol=1e-6,
)
assert_allclose(
L_la.to(phot_L_la, flux_L_la, u.spectral_density(wave)),
flux_phot_L_la,
rtol=1e-6,
)
# PHOTLAM <--> FNU
assert_allclose(
phot_L_la.to(L_nu, flux_phot_L_la, u.spectral_density(wave)),
flux_L_nu,
rtol=1e-6,
)
assert_allclose(
L_nu.to(phot_L_la, flux_L_nu, u.spectral_density(wave)),
flux_phot_L_la,
rtol=1e-6,
)
# PHOTLAM <--> PHOTNU
assert_allclose(
phot_L_la.to(phot_L_nu, flux_phot_L_la, u.spectral_density(wave)),
flux_phot_L_nu,
rtol=1e-6,
)
assert_allclose(
phot_L_nu.to(phot_L_la, flux_phot_L_nu, u.spectral_density(wave)),
flux_phot_L_la,
rtol=1e-6,
)
# PHOTNU <--> FNU
assert_allclose(
phot_L_nu.to(L_nu, flux_phot_L_nu, u.spectral_density(wave)),
flux_L_nu,
rtol=1e-6,
)
assert_allclose(
L_nu.to(phot_L_nu, flux_L_nu, u.spectral_density(wave)),
flux_phot_L_nu,
rtol=1e-6,
)
# PHOTNU <--> FLAM
assert_allclose(
phot_L_nu.to(L_la, flux_phot_L_nu, u.spectral_density(wave)),
flux_L_la,
rtol=1e-6,
)
assert_allclose(
L_la.to(phot_L_nu, flux_L_la, u.spectral_density(wave)),
flux_phot_L_nu,
rtol=1e-6,
)
def test_spectraldensity6():
"""Test surface brightness conversions."""
slam = u.erg / (u.cm**2 * u.s * u.AA * u.sr)
snu = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
sb_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
sb_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# S(nu) <--> S(lambda)
assert_allclose(snu.to(slam, sb_fnu, u.spectral_density(wave)), sb_flam, rtol=1e-6)
assert_allclose(slam.to(snu, sb_flam, u.spectral_density(wave)), sb_fnu, rtol=1e-6)
@pytest.mark.parametrize(
("from_unit", "to_unit"),
[
(u.ph / u.cm**2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.ph / u.cm**2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV)),
(u.erg / u.cm**2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.erg / u.cm**2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV)),
],
)
def test_spectraldensity_not_allowed(from_unit, to_unit):
"""Not allowed to succeed as
per https://github.com/astropy/astropy/pull/10015
"""
with pytest.raises(u.UnitConversionError, match="not convertible"):
from_unit.to(to_unit, 1, u.spectral_density(1 * u.AA))
# The other way
with pytest.raises(u.UnitConversionError, match="not convertible"):
to_unit.to(from_unit, 1, u.spectral_density(1 * u.AA))
def test_equivalent_units():
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = u.g.find_equivalent_units()
units_set = set(units)
match = {
u.M_e, u.M_p, u.g, u.kg, u.solMass, u.t, u.u, u.M_earth,
u.M_jup, imperial.oz, imperial.lb, imperial.st, imperial.ton,
imperial.slug,
} # fmt: skip
assert units_set == match
r = repr(units)
assert r.count("\n") == len(units) + 2
def test_equivalent_units2():
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad,
} # fmt: skip
assert units == match
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, imperial.BTU, u.Hz, u.J, u.Ry,
imperial.cal, u.cm, u.eV, u.erg, imperial.ft, imperial.fur,
imperial.inch, imperial.kcal, u.lyr, u.m, imperial.mi, u.lsec,
imperial.mil, u.micron, u.pc, u.solRad, imperial.yd, u.Bq, u.Ci,
imperial.nmi, u.k, u.earthRad, u.jupiterRad,
} # fmt: skip
assert units == match
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad,
} # fmt: skip
assert units == match
def test_trivial_equivalency():
assert u.m.to(u.kg, equivalencies=[(u.m, u.kg)]) == 1.0
def test_invalid_equivalency():
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m,)])
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m, 5.0)])
def test_irrelevant_equivalency():
with pytest.raises(u.UnitsError):
u.m.to(u.kg, equivalencies=[(u.m, u.l)])
def test_brightness_temperature():
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
np.testing.assert_almost_equal(
tb.value,
(1 * u.Jy).to_value(
u.K, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)
),
)
np.testing.assert_almost_equal(
1.0,
tb.to_value(
u.Jy, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)
),
)
def test_swapped_args_brightness_temperature():
"""
#5173 changes the order of arguments but accepts the old (deprecated) args
"""
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
with pytest.warns(AstropyDeprecationWarning) as w:
result = (1 * u.Jy).to(u.K, equivalencies=u.brightness_temperature(omega_B, nu))
roundtrip = result.to(u.Jy, equivalencies=u.brightness_temperature(omega_B, nu))
assert len(w) == 2
np.testing.assert_almost_equal(tb.value, result.value)
np.testing.assert_almost_equal(roundtrip.value, 1)
def test_surfacebrightness():
sb = 50 * u.MJy / u.sr
k = sb.to(u.K, u.brightness_temperature(50 * u.GHz))
np.testing.assert_almost_equal(k.value, 0.650965, 5)
assert k.unit.is_equivalent(u.K)
def test_beam():
# pick a beam area: 2 pi r^2 = area of a Gaussina with sigma=50 arcsec
omega_B = 2 * np.pi * (50 * u.arcsec) ** 2
new_beam = (5 * u.beam).to(u.sr, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(omega_B.to(u.sr).value * 5, new_beam.value)
assert new_beam.unit.is_equivalent(u.sr)
# make sure that it's still consistent with 5 beams
nbeams = new_beam.to(u.beam, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(nbeams.value, 5)
# test inverse beam equivalency
# (this is just a sanity check that the equivalency is defined;
# it's not for testing numerical consistency)
(5 / u.beam).to(1 / u.sr, u.equivalencies.beam_angular_area(omega_B))
# test practical case
# (this is by far the most important one)
flux_density = (5 * u.Jy / u.beam).to(
u.MJy / u.sr, u.equivalencies.beam_angular_area(omega_B)
)
np.testing.assert_almost_equal(flux_density.value, 13.5425483146382)
def test_thermodynamic_temperature():
nu = 143 * u.GHz
tb = 0.0026320501262630277 * u.K
eq = u.thermodynamic_temperature(nu, T_cmb=2.7255 * u.K)
np.testing.assert_almost_equal(
tb.value, (1 * (u.MJy / u.sr)).to_value(u.K, equivalencies=eq)
)
np.testing.assert_almost_equal(1.0, tb.to_value(u.MJy / u.sr, equivalencies=eq))
def test_equivalency_context():
with u.set_enabled_equivalencies(u.dimensionless_angles()):
phase = u.Quantity(1.0, u.cycle)
assert_allclose(np.exp(1j * phase), 1.0)
Omega = u.cycle / (1.0 * u.minute)
assert_allclose(np.exp(1j * Omega * 60.0 * u.second), 1.0)
# ensure we can turn off equivalencies even within the scope
with pytest.raises(u.UnitsError):
phase.to(1, equivalencies=None)
# test the manager also works in the Quantity constructor.
q1 = u.Quantity(phase, u.dimensionless_unscaled)
assert_allclose(q1.value, u.cycle.to(u.radian))
# and also if we use a class that happens to have a unit attribute.
class MyQuantityLookalike(np.ndarray):
pass
mylookalike = np.array(1.0).view(MyQuantityLookalike)
mylookalike.unit = "cycle"
# test the manager also works in the Quantity constructor.
q2 = u.Quantity(mylookalike, u.dimensionless_unscaled)
assert_allclose(q2.value, u.cycle.to(u.radian))
with u.set_enabled_equivalencies(u.spectral()):
u.GHz.to(u.cm)
eq_on = u.GHz.find_equivalent_units()
with pytest.raises(u.UnitsError):
u.GHz.to(u.cm, equivalencies=None)
# without equivalencies, we should find a smaller (sub)set
eq_off = u.GHz.find_equivalent_units()
assert all(eq in set(eq_on) for eq in eq_off)
assert set(eq_off) < set(eq_on)
# Check the equivalency manager also works in ufunc evaluations,
# not just using (wrong) scaling. [#2496]
l2v = u.doppler_optical(6000 * u.angstrom)
l1 = 6010 * u.angstrom
assert l1.to(u.km / u.s, equivalencies=l2v) > 100.0 * u.km / u.s
with u.set_enabled_equivalencies(l2v):
assert l1 > 100.0 * u.km / u.s
assert abs((l1 - 500.0 * u.km / u.s).to(u.angstrom)) < 1.0 * u.km / u.s
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
def just_to_from_units(equivalencies):
return [(equiv[0], equiv[1]) for equiv in equivalencies]
tf_dimensionless_angles = just_to_from_units(u.dimensionless_angles())
tf_spectral = just_to_from_units(u.spectral())
# <=1 b/c might have the dimensionless_redshift equivalency enabled.
assert len(base_registry.equivalencies) <= 1
with u.set_enabled_equivalencies(u.dimensionless_angles()):
new_registry = u.get_current_unit_registry()
assert set(just_to_from_units(new_registry.equivalencies)) == set(
tf_dimensionless_angles
)
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.set_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert set(just_to_from_units(newer_registry.equivalencies)) == set(
tf_spectral
)
assert set(newer_registry.all_units) == set(base_registry.all_units)
assert set(just_to_from_units(new_registry.equivalencies)) == set(
tf_dimensionless_angles
)
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.add_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert set(just_to_from_units(newer_registry.equivalencies)) == set(
tf_dimensionless_angles
) | set(tf_spectral)
assert set(newer_registry.all_units) == set(base_registry.all_units)
assert base_registry is u.get_current_unit_registry()
def test_temperature():
from astropy.units.imperial import deg_F, deg_R
t_k = 0 * u.K
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -273.15)
assert_allclose(t_k.to_value(deg_F, u.temperature()), -459.67)
t_k = 20 * u.K
assert_allclose(t_k.to_value(deg_R, u.temperature()), 36.0)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.K, u.temperature()), 11.11, atol=0.01)
t_k = 20 * deg_F
assert_allclose(t_k.to_value(deg_R, u.temperature()), 479.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(deg_F, u.temperature()), -439.67)
t_k = 20 * u.deg_C
assert_allclose(t_k.to_value(deg_R, u.temperature()), 527.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -262.039, atol=0.01)
def test_temperature_energy():
x = 1000 * u.K
y = (x * constants.k_B).to(u.keV)
assert_allclose(x.to_value(u.keV, u.temperature_energy()), y.value)
assert_allclose(y.to_value(u.K, u.temperature_energy()), x.value)
def test_molar_mass_amu():
x = 1 * (u.g / u.mol)
y = 1 * u.u
assert_allclose(x.to_value(u.u, u.molar_mass_amu()), y.value)
assert_allclose(y.to_value(u.g / u.mol, u.molar_mass_amu()), x.value)
with pytest.raises(u.UnitsError):
x.to(u.u)
def test_compose_equivalencies():
x = u.Unit("arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.pc
x = u.Unit("2 arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.Unit(0.5 * u.pc)
x = u.degree.compose(equivalencies=u.dimensionless_angles())
assert u.Unit(u.degree.to(u.radian)) in x
x = (u.nm).compose(
units=(u.m, u.s), equivalencies=u.doppler_optical(0.55 * u.micron)
)
for y in x:
if y.bases == [u.m, u.s]:
assert y.powers == [1, -1]
assert_allclose(
y.scale,
u.nm.to(u.m / u.s, equivalencies=u.doppler_optical(0.55 * u.micron)),
)
break
else:
assert False, "Didn't find speed in compose results"
def test_pixel_scale():
pix = 75 * u.pix
asec = 30 * u.arcsec
pixscale = 0.4 * u.arcsec / u.pix
pixscale2 = 2.5 * u.pix / u.arcsec
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_pixel_scale_invalid_scale_unit():
pixscale = 0.4 * u.arcsec
pixscale2 = 0.4 * u.arcsec / u.pix**2
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale)
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale2)
def test_pixel_scale_acceptable_scale_unit():
pix = 75 * u.pix
v = 3000 * (u.cm / u.s)
pixscale = 0.4 * (u.m / u.s / u.pix)
pixscale2 = 2.5 * (u.pix / (u.m / u.s))
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_plate_scale():
mm = 1.5 * u.mm
asec = 30 * u.arcsec
platescale = 20 * u.arcsec / u.mm
platescale2 = 0.05 * u.mm / u.arcsec
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale)), mm)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale2)), mm)
def test_equivelency():
ps = u.pixel_scale(10 * u.arcsec / u.pix)
assert isinstance(ps, Equivalency)
assert isinstance(ps.name, list)
assert len(ps.name) == 1
assert ps.name[0] == "pixel_scale"
assert isinstance(ps.kwargs, list)
assert len(ps.kwargs) == 1
assert ps.kwargs[0] == dict({"pixscale": 10 * u.arcsec / u.pix})
def test_add_equivelencies():
e1 = u.pixel_scale(10 * u.arcsec / u.pixel) + u.temperature_energy()
assert isinstance(e1, Equivalency)
assert e1.name == ["pixel_scale", "temperature_energy"]
assert isinstance(e1.kwargs, list)
assert e1.kwargs == [dict({"pixscale": 10 * u.arcsec / u.pix}), dict()]
e2 = u.pixel_scale(10 * u.arcsec / u.pixel) + [1, 2, 3]
assert isinstance(e2, list)
def test_pprint():
pprint_class = u.UnitBase.EquivalentUnitsList
equiv_units_to_Hz = u.Hz.find_equivalent_units()
assert pprint_class.__repr__(equiv_units_to_Hz).splitlines() == [
" Primary name | Unit definition | Aliases ",
"[",
" Bq | 1 / s | becquerel ,",
" Ci | 3.7e+10 / s | curie ,",
" Hz | 1 / s | Hertz, hertz ,",
"]",
]
assert (
pprint_class._repr_html_(equiv_units_to_Hz) == '<table style="width:50%">'
"<tr><th>Primary name</th><th>Unit definition</th>"
"<th>Aliases</th></tr>"
"<tr><td>Bq</td><td>1 / s</td><td>becquerel</td></tr>"
"<tr><td>Ci</td><td>3.7e+10 / s</td><td>curie</td></tr>"
"<tr><td>Hz</td><td>1 / s</td><td>Hertz, hertz</td></tr></table>"
)
|
bc67b4d4e4ac2d8718789a9700b8516297560f01edf22339e5068b01415c369a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Regression tests for the units package."""
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.units import utils
def test_initialisation():
assert u.Unit(u.m) is u.m
ten_meter = u.Unit(10.0 * u.m)
assert ten_meter == u.CompositeUnit(10.0, [u.m], [1])
assert u.Unit(ten_meter) is ten_meter
assert u.Unit(10.0 * ten_meter) == u.CompositeUnit(100.0, [u.m], [1])
foo = u.Unit("foo", (10.0 * ten_meter) ** 2, namespace=locals())
assert foo == u.CompositeUnit(10000.0, [u.m], [2])
assert u.Unit("m") == u.m
assert u.Unit("") == u.dimensionless_unscaled
assert u.one == u.dimensionless_unscaled
assert u.Unit("10 m") == ten_meter
assert u.Unit(10.0) == u.CompositeUnit(10.0, [], [])
assert u.Unit() == u.dimensionless_unscaled
def test_invalid_power():
x = u.m ** Fraction(1, 3)
assert isinstance(x.powers[0], Fraction)
x = u.m ** Fraction(1, 2)
assert isinstance(x.powers[0], float)
# Test the automatic conversion to a fraction
x = u.m ** (1.0 / 3.0)
assert isinstance(x.powers[0], Fraction)
def test_invalid_compare():
assert not (u.m == u.s)
def test_convert():
assert u.h._get_converter(u.s)(1) == 3600
def test_convert_fail():
with pytest.raises(u.UnitsError):
u.cm.to(u.s, 1)
with pytest.raises(u.UnitsError):
(u.cm / u.s).to(u.m, 1)
def test_composite():
assert (u.cm / u.s * u.h)._get_converter(u.m)(1) == 36
assert u.cm * u.cm == u.cm**2
assert u.cm * u.cm * u.cm == u.cm**3
assert u.Hz.to(1000 * u.Hz, 1) == 0.001
def test_str():
assert str(u.cm) == "cm"
def test_repr():
assert repr(u.cm) == 'Unit("cm")'
def test_represents():
assert u.m.represents is u.m
assert u.km.represents.scale == 1000.0
assert u.km.represents.bases == [u.m]
assert u.Ry.scale == 1.0 and u.Ry.bases == [u.Ry]
assert_allclose(u.Ry.represents.scale, 13.605692518464949)
assert u.Ry.represents.bases == [u.eV]
bla = u.def_unit("bla", namespace=locals())
assert bla.represents is bla
blabla = u.def_unit("blabla", 10 * u.hr, namespace=locals())
assert blabla.represents.scale == 10.0
assert blabla.represents.bases == [u.hr]
assert blabla.decompose().scale == 10 * 3600
assert blabla.decompose().bases == [u.s]
def test_units_conversion():
assert_allclose(u.kpc.to(u.Mpc), 0.001)
assert_allclose(u.Mpc.to(u.kpc), 1000)
assert_allclose(u.yr.to(u.Myr), 1.0e-6)
assert_allclose(u.AU.to(u.pc), 4.84813681e-6)
assert_allclose(u.cycle.to(u.rad), 6.283185307179586)
assert_allclose(u.spat.to(u.sr), 12.56637061435917)
def test_units_manipulation():
# Just do some manipulation and check it's happy
(u.kpc * u.yr) ** Fraction(1, 3) / u.Myr
(u.AA * u.erg) ** 9
def test_decompose():
assert u.Ry == u.Ry.decompose()
def test_dimensionless_to_si():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the SI system
"""
testunit = (1.0 * u.kpc) / (1.0 * u.Mpc)
assert testunit.unit.physical_type == "dimensionless"
assert_allclose(testunit.si, 0.001)
def test_dimensionless_to_cgs():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the CGS system
"""
testunit = (1.0 * u.m) / (1.0 * u.km)
assert testunit.unit.physical_type == "dimensionless"
assert_allclose(testunit.cgs, 0.001)
def test_unknown_unit():
with pytest.warns(u.UnitsWarning, match="FOO"):
u.Unit("FOO", parse_strict="warn")
def test_multiple_solidus():
with pytest.warns(
u.UnitsWarning,
match="'m/s/kg' contains multiple slashes, which is discouraged",
):
assert u.Unit("m/s/kg").to_string() == "m / (kg s)"
with pytest.raises(ValueError):
u.Unit("m/s/kg", format="vounit")
# Regression test for #9000: solidi in exponents do not count towards this.
x = u.Unit("kg(3/10) * m(5/2) / s", format="vounit")
assert x.to_string() == "kg(3/10) m(5/2) / s"
def test_unknown_unit3():
unit = u.Unit("FOO", parse_strict="silent")
assert isinstance(unit, u.UnrecognizedUnit)
assert unit.name == "FOO"
unit2 = u.Unit("FOO", parse_strict="silent")
assert unit == unit2
assert unit.is_equivalent(unit2)
unit3 = u.Unit("BAR", parse_strict="silent")
assert unit != unit3
assert not unit.is_equivalent(unit3)
# Also test basic (in)equalities.
assert unit == "FOO"
assert unit != u.m
# next two from gh-7603.
assert unit != None # noqa: E711
assert unit not in (None, u.m)
with pytest.raises(ValueError):
unit._get_converter(unit3)
_ = unit.to_string("latex")
_ = unit2.to_string("cgs")
with pytest.raises(ValueError):
u.Unit("BAR", parse_strict="strict")
with pytest.raises(TypeError):
u.Unit(None)
def test_invalid_scale():
with pytest.raises(TypeError):
["a", "b", "c"] * u.m
def test_cds_power():
unit = u.Unit("10+22/cm2", format="cds", parse_strict="silent")
assert unit.scale == 1e22
def test_register():
foo = u.def_unit("foo", u.m**3, namespace=locals())
assert "foo" in locals()
with u.add_enabled_units(foo):
assert "foo" in u.get_current_unit_registry().registry
assert "foo" not in u.get_current_unit_registry().registry
def test_in_units():
speed_unit = u.cm / u.s
_ = speed_unit.in_units(u.pc / u.hour, 1)
def test_null_unit():
assert (u.m / u.m) == u.Unit(1)
def test_unrecognized_equivalency():
assert u.m.is_equivalent("foo") is False
assert u.m.is_equivalent("pc") is True
def test_convertible_exception():
with pytest.raises(u.UnitsError, match=r"length.+ are not convertible"):
u.AA.to(u.h * u.s**2)
def test_convertible_exception2():
with pytest.raises(u.UnitsError, match=r"length. and .+time.+ are not convertible"):
u.m.to(u.s)
def test_invalid_type():
class A:
pass
with pytest.raises(TypeError):
u.Unit(A())
def test_steradian():
"""
Issue #599
"""
assert u.sr.is_equivalent(u.rad * u.rad)
results = u.sr.compose(units=u.cgs.bases)
assert results[0].bases[0] is u.rad
results = u.sr.compose(units=u.cgs.__dict__)
assert results[0].bases[0] is u.sr
def test_decompose_bases():
"""
From issue #576
"""
from astropy.constants import e
from astropy.units import cgs
d = e.esu.unit.decompose(bases=cgs.bases)
assert d._bases == [u.cm, u.g, u.s]
assert d._powers == [Fraction(3, 2), 0.5, -1]
assert d._scale == 1.0
def test_complex_compose():
complex = u.cd * u.sr * u.Wb
composed = complex.compose()
assert set(composed[0]._bases) == {u.lm, u.Wb}
def test_equiv_compose():
composed = u.m.compose(equivalencies=u.spectral())
assert any([u.Hz] == x.bases for x in composed)
def test_empty_compose():
with pytest.raises(u.UnitsError):
u.m.compose(units=[])
def _unit_as_str(unit):
# This function serves two purposes - it is used to sort the units to
# test alphabetically, and it is also use to allow pytest to show the unit
# in the [] when running the parametrized tests.
return str(unit)
# We use a set to make sure we don't have any duplicates.
COMPOSE_ROUNDTRIP = set()
for val in u.__dict__.values():
if isinstance(val, u.UnitBase) and not isinstance(val, u.PrefixUnit):
COMPOSE_ROUNDTRIP.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_ROUNDTRIP, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_roundtrip(unit):
composed_list = unit.decompose().compose()
found = False
for composed in composed_list:
if len(composed.bases):
if composed.bases[0] is unit:
found = True
break
elif len(unit.bases) == 0:
found = True
break
assert found
# We use a set to make sure we don't have any duplicates.
COMPOSE_CGS_TO_SI = set()
for val in u.cgs.__dict__.values():
# Can't decompose Celsius
if (
isinstance(val, u.UnitBase)
and not isinstance(val, u.PrefixUnit)
and val != u.cgs.deg_C
):
COMPOSE_CGS_TO_SI.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_CGS_TO_SI, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_cgs_to_si(unit):
si = unit.to_system(u.si)
assert [x.is_equivalent(unit) for x in si]
assert si[0] == unit.si
# We use a set to make sure we don't have any duplicates.
COMPOSE_SI_TO_CGS = set()
for val in u.si.__dict__.values():
# Can't decompose Celsius
if (
isinstance(val, u.UnitBase)
and not isinstance(val, u.PrefixUnit)
and val != u.si.deg_C
):
COMPOSE_SI_TO_CGS.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_SI_TO_CGS, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_si_to_cgs(unit):
# Can't convert things with Ampere to CGS without more context
try:
cgs = unit.to_system(u.cgs)
except u.UnitsError:
if u.A in unit.decompose().bases:
pass
else:
raise
else:
assert [x.is_equivalent(unit) for x in cgs]
assert cgs[0] == unit.cgs
def test_to_si():
"""Check units that are not official derived units.
Should not appear on its own or as part of a composite unit.
"""
# TODO: extend to all units not listed in Tables 1--6 of
# https://physics.nist.gov/cuu/Units/units.html
# See gh-10585.
# This was always the case
assert u.bar.si is not u.bar
# But this used to fail.
assert u.bar not in (u.kg / (u.s**2 * u.sr * u.nm)).si._bases
def test_to_cgs():
assert u.Pa.to_system(u.cgs)[1]._bases[0] is u.Ba
assert u.Pa.to_system(u.cgs)[1]._scale == 10.0
def test_decompose_to_cgs():
from astropy.units import cgs
assert u.m.decompose(bases=cgs.bases)._bases[0] is cgs.cm
def test_compose_issue_579():
unit = u.kg * u.s**2 / u.m
result = unit.compose(units=[u.N, u.s, u.m])
assert len(result) == 1
assert result[0]._bases == [u.s, u.N, u.m]
assert result[0]._powers == [4, 1, -2]
def test_compose_prefix_unit():
x = u.m.compose(units=(u.m,))
assert x[0].bases[0] is u.m
assert x[0].scale == 1.0
x = u.m.compose(units=[u.km], include_prefix_units=True)
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = u.m.compose(units=[u.km])
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = (u.km / u.s).compose(units=(u.pc, u.Myr))
assert x[0].bases == [u.pc, u.Myr]
assert_allclose(x[0].scale, 1.0227121650537077)
with pytest.raises(u.UnitsError):
(u.km / u.s).compose(units=(u.pc, u.Myr), include_prefix_units=False)
def test_self_compose():
unit = u.kg * u.s
assert len(unit.compose(units=[u.g, u.s])) == 1
def test_compose_failed():
unit = u.kg
with pytest.raises(u.UnitsError):
unit.compose(units=[u.N])
def test_compose_fractional_powers():
# Warning: with a complicated unit, this test becomes very slow;
# e.g., x = (u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2)
# takes 3 s
x = u.m**0.5 / u.yr**1.5
factored = x.compose()
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.cgs)
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.si)
for unit in factored:
assert x.decompose() == unit.decompose()
def test_compose_best_unit_first():
results = u.l.compose()
assert len(results[0].bases) == 1
assert results[0].bases[0] is u.l
results = (u.s**-1).compose()
assert results[0].bases[0] in (u.Hz, u.Bq)
results = (u.Ry.decompose()).compose()
assert results[0].bases[0] is u.Ry
def test_compose_no_duplicates():
new = u.kg / u.s**3 * u.au**2.5 / u.yr**0.5 / u.sr**2
composed = new.compose(units=u.cgs.bases)
assert len(composed) == 1
def test_long_int():
"""
Issue #672
"""
sigma = 10**21 * u.M_p / u.cm**2
sigma.to(u.M_sun / u.pc**2)
def test_endian_independence():
"""
Regression test for #744
A logic issue in the units code meant that big endian arrays could not be
converted because the dtype is '>f4', not 'float32', and the code was
looking for the strings 'float' or 'int'.
"""
for endian in ["<", ">"]:
for ntype in ["i", "f"]:
for byte in ["4", "8"]:
x = np.array([1, 2, 3], dtype=(endian + ntype + byte))
u.m.to(u.cm, x)
def test_radian_base():
"""
Issue #863
"""
assert (1 * u.degree).si.unit == u.rad
def test_no_as():
# We don't define 'as', since it is a keyword, but we
# do want to define the long form (`attosecond`).
assert not hasattr(u, "as")
assert hasattr(u, "attosecond")
def test_no_duplicates_in_names():
# Regression test for #5036
assert u.ct.names == ["ct", "count"]
assert u.ct.short_names == ["ct", "count"]
assert u.ct.long_names == ["count"]
assert set(u.ph.names) == set(u.ph.short_names) | set(u.ph.long_names)
def test_pickling():
p = pickle.dumps(u.m)
other = pickle.loads(p)
assert other is u.m
new_unit = u.IrreducibleUnit(["foo"], format={"baz": "bar"})
# This is local, so the unit should not be registered.
assert "foo" not in u.get_current_unit_registry().registry
# Test pickling of this unregistered unit.
p = pickle.dumps(new_unit)
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ["foo"]
assert new_unit_copy.get_format_name("baz") == "bar"
# It should still not be registered.
assert "foo" not in u.get_current_unit_registry().registry
# Now try the same with a registered unit.
with u.add_enabled_units([new_unit]):
p = pickle.dumps(new_unit)
assert "foo" in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is new_unit
# Check that a registered unit can be loaded and that it gets re-enabled.
with u.add_enabled_units([]):
assert "foo" not in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ["foo"]
assert new_unit_copy.get_format_name("baz") == "bar"
assert "foo" in u.get_current_unit_registry().registry
# And just to be sure, that it gets removed outside of the context.
assert "foo" not in u.get_current_unit_registry().registry
def test_pickle_between_sessions():
"""We cannot really test between sessions easily, so fake it.
This test can be changed if the pickle protocol or the code
changes enough that it no longer works.
"""
hash_m = hash(u.m)
unit = pickle.loads(
b"\x80\x04\x95\xd6\x00\x00\x00\x00\x00\x00\x00\x8c\x12"
b"astropy.units.core\x94\x8c\x1a_recreate_irreducible_unit"
b"\x94\x93\x94h\x00\x8c\x0fIrreducibleUnit\x94\x93\x94]\x94"
b"(\x8c\x01m\x94\x8c\x05meter\x94e\x88\x87\x94R\x94}\x94(\x8c\x06"
b"_names\x94]\x94(h\x06h\x07e\x8c\x0c_short_names"
b"\x94]\x94h\x06a\x8c\x0b_long_names\x94]\x94h\x07a\x8c\x07"
b"_format\x94}\x94\x8c\x07__doc__\x94\x8c "
b"meter: base unit of length in SI\x94ub."
)
assert unit is u.m
assert hash(u.m) == hash_m
@pytest.mark.parametrize(
"unit",
[u.IrreducibleUnit(["foo"], format={"baz": "bar"}), u.Unit("m_per_s", u.m / u.s)],
)
def test_pickle_does_not_keep_memoized_hash(unit):
"""
Tests private attribute since the problem with _hash being pickled
and restored only appeared if the unpickling was done in another
session, for which the hash no longer was valid, and it is difficult
to mimic separate sessions in a simple test. See gh-11872.
"""
unit_hash = hash(unit)
assert unit._hash is not None
unit_copy = pickle.loads(pickle.dumps(unit))
# unit is not registered so we get a copy.
assert unit_copy is not unit
assert unit_copy._hash is None
assert hash(unit_copy) == unit_hash
with u.add_enabled_units([unit]):
# unit is registered, so we get a reference.
unit_ref = pickle.loads(pickle.dumps(unit))
if isinstance(unit, u.IrreducibleUnit):
assert unit_ref is unit
else:
assert unit_ref is not unit
# pickle.load used to override the hash, although in this case
# it would be the same anyway, so not clear this tests much.
assert hash(unit) == unit_hash
def test_pickle_unrecognized_unit():
"""
Issue #2047
"""
a = u.Unit("asdf", parse_strict="silent")
pickle.loads(pickle.dumps(a))
def test_duplicate_define():
with pytest.raises(ValueError):
u.def_unit("m", namespace=u.__dict__)
def test_all_units():
from astropy.units.core import get_current_unit_registry
registry = get_current_unit_registry()
assert len(registry.all_units) > len(registry.non_prefix_units)
def test_repr_latex():
assert u.m._repr_latex_() == u.m.to_string("latex")
def test_operations_with_strings():
assert u.m / "5s" == (u.m / (5.0 * u.s))
assert u.m * "5s" == (5.0 * u.m * u.s)
def test_comparison():
assert u.m > u.cm
assert u.m >= u.cm
assert u.cm < u.m
assert u.cm <= u.m
with pytest.raises(u.UnitsError):
u.m > u.kg
def test_compose_into_arbitrary_units():
# Issue #1438
from astropy.constants import G
G.decompose([u.kg, u.km, u.Unit("15 s")])
def test_unit_multiplication_with_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = "kg"
assert us * u1 == u.Unit(us) * u1
assert u1 * us == u1 * u.Unit(us)
def test_unit_division_by_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = "kg"
assert us / u1 == u.Unit(us) / u1
assert u1 / us == u1 / u.Unit(us)
def test_sorted_bases():
"""See #1616."""
assert (u.m * u.Jy).bases == (u.Jy * u.m).bases
def test_megabit():
"""See #1543"""
assert u.Mbit is u.Mb
assert u.megabit is u.Mb
assert u.Mbyte is u.MB
assert u.megabyte is u.MB
def test_composite_unit_get_format_name():
"""See #1576"""
unit1 = u.Unit("nrad/s")
unit2 = u.Unit("Hz(1/2)")
assert str(u.CompositeUnit(1, [unit1, unit2], [1, -1])) == "nrad / (Hz(1/2) s)"
def test_unicode_policy():
from astropy.tests.helper import assert_follows_unicode_guidelines
assert_follows_unicode_guidelines(u.degree, roundtrip=u.__dict__)
def test_suggestions():
for search, matches in [
("microns", "micron"),
("s/microns", "micron"),
("M", "m"),
("metre", "meter"),
("angstroms", "Angstrom or angstrom"),
("milimeter", "millimeter"),
("ångström", "Angstrom, angstrom, mAngstrom or mangstrom"),
("kev", "EV, eV, kV or keV"),
]:
with pytest.raises(ValueError, match=f"Did you mean {matches}"):
u.Unit(search)
def test_fits_hst_unit():
"""See #1911."""
with pytest.warns(u.UnitsWarning, match="multiple slashes") as w:
x = u.Unit("erg /s /cm**2 /angstrom")
assert x == u.erg * u.s**-1 * u.cm**-2 * u.angstrom**-1
assert len(w) == 1
def test_barn_prefixes():
"""Regression test for https://github.com/astropy/astropy/issues/3753"""
assert u.fbarn is u.femtobarn
assert u.pbarn is u.picobarn
def test_fractional_powers():
"""See #2069"""
m = 1e9 * u.Msun
tH = 1.0 / (70.0 * u.km / u.s / u.Mpc)
vc = 200 * u.km / u.s
x = (c.G**2 * m**2 * tH.cgs) ** Fraction(1, 3) / vc
v1 = x.to("pc")
x = (c.G**2 * m**2 * tH) ** Fraction(1, 3) / vc
v2 = x.to("pc")
x = (c.G**2 * m**2 * tH.cgs) ** (1.0 / 3.0) / vc
v3 = x.to("pc")
x = (c.G**2 * m**2 * tH) ** (1.0 / 3.0) / vc
v4 = x.to("pc")
assert_allclose(v1, v2)
assert_allclose(v2, v3)
assert_allclose(v3, v4)
x = u.m ** (1.0 / 101.0)
assert isinstance(x.powers[0], float)
x = u.m ** (3.0 / 7.0)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 3
assert x.powers[0].denominator == 7
x = u.cm ** Fraction(1, 2) * u.cm ** Fraction(2, 3)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(7, 6)
# Regression test for #9258.
x = (u.TeV ** (-2.2)) ** (1 / -2.2)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(1, 1)
def test_sqrt_mag():
sqrt_mag = u.mag**0.5
assert hasattr(sqrt_mag.decompose().scale, "imag")
assert (sqrt_mag.decompose()) ** 2 == u.mag
def test_composite_compose():
# Issue #2382
composite_unit = u.s.compose(units=[u.Unit("s")])[0]
u.s.compose(units=[composite_unit])
def test_data_quantities():
assert u.byte.is_equivalent(u.bit)
def test_compare_with_none():
# Ensure that equality comparisons with `None` work, and don't
# raise exceptions. We are deliberately not using `is None` here
# because that doesn't trigger the bug. See #3108.
assert not (u.m == None) # noqa: E711
assert u.m != None # noqa: E711
def test_validate_power_detect_fraction():
frac = utils.validate_power(1.1666666666666665)
assert isinstance(frac, Fraction)
assert frac.numerator == 7
assert frac.denominator == 6
def test_complex_fractional_rounding_errors():
# See #3788
kappa = 0.34 * u.cm**2 / u.g
r_0 = 886221439924.7849 * u.cm
q = 1.75
rho_0 = 5e-10 * u.solMass / u.solRad**3
y = 0.5
beta = 0.19047619047619049
a = 0.47619047619047628
m_h = 1e6 * u.solMass
t1 = 2 * c.c / (kappa * np.sqrt(np.pi))
t2 = (r_0**-q) / (rho_0 * y * beta * (a * c.G * m_h) ** 0.5)
result = (t1 * t2) ** -0.8
assert result.unit.physical_type == "length"
result.to(u.solRad)
def test_fractional_rounding_errors_simple():
x = (u.m**1.5) ** Fraction(4, 5)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 6
assert x.powers[0].denominator == 5
def test_enable_unit_groupings():
from astropy.units import cds
with cds.enable():
assert cds.geoMass in u.kg.find_equivalent_units()
from astropy.units import imperial
with imperial.enable():
assert imperial.inch in u.m.find_equivalent_units()
def test_unit_summary_prefixes():
"""
Test for a few units that the unit summary table correctly reports
whether or not that unit supports prefixes.
Regression test for https://github.com/astropy/astropy/issues/3835
"""
from astropy.units import astrophys
for summary in utils._iter_unit_summary(astrophys.__dict__):
unit, _, _, _, prefixes = summary
if unit.name == "lyr":
assert prefixes
elif unit.name == "pc":
assert prefixes
elif unit.name == "barn":
assert prefixes
elif unit.name == "cycle":
assert prefixes == "No"
elif unit.name == "spat":
assert prefixes == "No"
elif unit.name == "vox":
assert prefixes == "Yes"
def test_raise_to_negative_power():
"""Test that order of bases is changed when raising to negative power.
Regression test for https://github.com/astropy/astropy/issues/8260
"""
m2s2 = u.m**2 / u.s**2
spm = m2s2 ** (-1 / 2)
assert spm.bases == [u.s, u.m]
assert spm.powers == [1, -1]
assert spm == u.s / u.m
@pytest.mark.parametrize(
"name, symbol, multiplying_factor",
[
("quetta", "Q", 1e30),
("ronna", "R", 1e27),
("yotta", "Y", 1e24),
("zetta", "Z", 1e21),
("exa", "E", 1e18),
("peta", "P", 1e15),
("tera", "T", 1e12),
("giga", "G", 1e9),
("mega", "M", 1e6),
("kilo", "k", 1e3),
("deca", "da", 1e1),
("deci", "d", 1e-1),
("centi", "c", 1e-2),
("milli", "m", 1e-3),
("micro", "u", 1e-6),
("nano", "n", 1e-9),
("pico", "p", 1e-12),
("femto", "f", 1e-15),
("atto", "a", 1e-18),
("zepto", "z", 1e-21),
("yocto", "y", 1e-24),
("ronto", "r", 1e-27),
("quecto", "q", 1e-30),
],
)
def test_si_prefixes(name, symbol, multiplying_factor):
base = 1 * u.g
quantity_from_symbol = base.to(f"{symbol}g")
quantity_from_name = base.to(f"{name}gram")
assert u.isclose(quantity_from_name, base)
assert u.isclose(quantity_from_symbol, base)
value_ratio = base.value / quantity_from_symbol.value
assert u.isclose(value_ratio, multiplying_factor)
|
549bb55cb9286c6f249e3b1a505e25272d4704107eec37db871540b2350a2989 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED,
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS,
TBD_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
def get_wrapped_functions(*modules):
wrapped_functions = {}
for mod in modules:
for name, f in mod.__dict__.items():
if f is np.printoptions or name.startswith("_"):
continue
if callable(f) and hasattr(f, "__wrapped__"):
wrapped_functions[name] = f
return wrapped_functions
all_wrapped_functions = get_wrapped_functions(
np, np.fft, np.linalg, np.lib.recfunctions
)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup_method(self):
self.q = np.arange(9.0).reshape(3, 3) / 4.0 * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1.0 * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1.0 * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1.0 * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150.0, 350.0]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices, axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis, self.q.value) * self.q.unit**2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize("axes", ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup_method(self):
self.q = (np.arange(9.0).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.0)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value, axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True], self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.0) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.0) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.0)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.0) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.0) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.0).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25.0 * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup_method(self):
self.q1 = np.arange(6.0).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop("q_list", [self.q1, self.q2])
q_ref = kwargs.pop("q_ref", q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
# regression test for gh-13322.
self.check(np.concatenate, dtype="f4")
self.check(
np.concatenate,
q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1,
)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = (
np.concatenate([self.q1.value, self.q2.to_value(self.q1.unit)])
* self.q1.unit
)
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0.0, 1.0 * u.m], [1.0 * u.cm, 2.0 * u.km]])
assert np.all(result == np.block([[0, 1.0], [0.01, 2000.0]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = (
np.append(self.q1.value, self.q2.to_value(self.q1.unit), axis=0)
* self.q1.unit
)
assert np.all(out == expected)
a = np.arange(3.0)
result = np.append(a, 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.0).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50.0, 25.0] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.0)
result = np.insert(a, (2,), 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50.0 * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0.0 * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1.0, 6.0) * u.m
out = np.pad(q, (2, 3), "constant", constant_values=(0.0, 150.0 * u.cm))
assert out.unit == q.unit
expected = (
np.pad(q.value, (2, 3), "constant", constant_values=(0.0, 1.5)) * q.unit
)
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), "constant", constant_values=150.0 * u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), "constant", constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), "linear_ramp", end_values=(25.0 * u.cm, 0.0))
assert out3.unit == q.unit
expected3 = (
np.pad(q.value, (2, 3), "linear_ramp", end_values=(0.25, 0.0)) * q.unit
)
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.arange(54.0).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0.0, 10.0, 20.0]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
unit = self.q.unit
expected = (
np.clip(self.q.value, qmin.to_value(unit), qmax.to_value(unit)) * unit
)
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.0 * u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1.0 * u.km)
expected = np.where([True, False, True], self.q.value, 1000.0) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select(
[q < 0.55 * u.m, q > 1.0 * u.m], [q, q.to(u.cm)], default=-1.0 * u.km
)
expected = (
np.select([q.value < 0.55, q.value > 1], [q.value, q.value], default=-1000)
* u.m
)
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1 * u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1.0, 2.0] * u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.0 * u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q, nan=1.0 * u.km, posinf=2.0 * u.km, neginf=-2 * u.km)
expected = [-2000.0, 2000.0, 1000.0, 3.0, 4.0] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1.0 + 1j] * u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1.0 + 1j] * u.m)
def test_isclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 102.0, 199.0]) * u.cm
atol = 1.5 * u.cm
rtol = 1.0 * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(
q1.value, q2.to_value(q1.unit), atol=atol.to_value(q1.unit)
)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 198.0]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit), atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit**2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
atol = 2 * u.cm
rtol = 1.0 * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0.0, rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2 * u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.0 * u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.0) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0.0, 1.0, 2.0]] * 3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
class TestNanFunctions(InvariantUnitTestSetup):
def setup_method(self):
super().setup_method()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit**2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.dot(q1, q2)
assert o == 32.0 * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32.0 + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.0).reshape(3, 4, 5) * u.m
b = np.arange(24.0).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value, axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum("...i", q1)
assert np.all(o == q1)
o = np.einsum("ii", q1)
expected = np.einsum("ii", q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum("ij,jk", q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum("ij,jk", q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum_path("...i", q1)
assert o[0] == ["einsum_path", (0,)]
o = np.einsum_path("ii", q1)
assert o[0] == ["einsum_path", (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path("ij,jk", q1, q2)
assert o[0] == ["einsum_path", (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.0) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10.0 * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.0) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.0) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.0) * x.unit
assert np.all(out == expected)
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.0, n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
spacing = 10.0 * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit / spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2.0 * u.s
y = [1.0, 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.0 * u.dex(unit), 20 * u.dex(unit), 10)
expected = np.logspace(10.0, 20.0, 10) * unit
assert np.all(out == expected)
out = np.logspace(10.0 * u.STmag, 20 * u.STmag, 10)
expected = np.logspace(10.0, 20.0, 10, base=10.0 ** (-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1.0, 7.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250.0, 2750.0]) * u.m
xp = np.arange(5.0) * u.km
yp = np.arange(5.0) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1 * u.s, 1 * u.day])
expected = (
np.piecewise(x.value, [x.value < 0, x.value >= 0], [-1, 24 * 3600]) * u.s
)
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [-1 * u.s, 1 * u.day, lambda x: 1 * u.hour]
)
expected2 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [-1, 24 * 3600, 3600])
* u.s
)
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [0, 1 * u.percent, lambda x: 1 * u.one]
)
expected3 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [0, 0.01, 1]) * u.one
)
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.0])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.0])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500.0, 2500.0, 4500.0]) * u.m
bins = np.arange(10.0) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup_method(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(
self,
function,
*args,
value_args=None,
value_kwargs=None,
expected_units=None,
**kwargs
):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(
out[bin_slice], expected[bin_slice], expected_units[bin_slice]
):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(
np.histogram, x, value_args=(x.value,), expected_units=(None, x.unit)
)
# With bins.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
value_args=(x.value, [1.25, 2.0]),
expected_units=(None, x.unit),
)
# With density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
density=True,
value_args=(x.value, [1.25, 2.0]),
expected_units=(1 / x.unit, x.unit),
)
# With weights.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit),
)
# With weights and density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
density=True,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit / x.unit, x.unit),
)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.0]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(
np.histogram2d,
x,
y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit),
)
# Check units with density.
self.check(
np.histogram2d,
x,
y,
density=True,
value_args=(x.value, y.value),
expected_units=(1 / (x.unit * y.unit), x.unit, y.unit),
)
# Check units with weights.
self.check(
np.histogram2d,
x,
y,
weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogram2d,
x,
y,
[5, inb_y],
value_args=(x.value, y.value, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, x.unit, y.unit),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogram2d,
x.value,
y.value,
bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, u.one, u.one),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(
np.histogramdd,
sample,
value_args=(sample_values,),
expected_units=(None, sample_units),
)
# Check units with density.
self.check(
np.histogramdd,
sample,
density=True,
value_args=(sample_values,),
expected_units=(1 / (self.x.unit * self.y.unit), sample_units),
)
# Check units with weights.
self.check(
np.histogramdd,
sample,
weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogramdd,
sample,
[5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, sample_units),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogramdd,
sample_values,
bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, (u.one, u.one)),
)
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(
np.histogramdd,
xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(
np.histogramdd,
(xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
def test_sort_axis(self):
self.check(np.sort, axis=0)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
self.check(np.msort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup_method(self):
self.q = np.arange(3.0) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=", ")
expected1 = "[0., 1., 2.]"
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=", ", formatter={"all": str})
expected2 = "[0.0 Jy, 1.0 Jy, 2.0 Jy]"
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.q, None, None, None, ", ", "", np._NoValue, {"float": str}
)
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=", ", formatter={"int": str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype("f4")
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup_method(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), "m", dtype="u1")
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.0) * u.m
q2 = np.arange(5.0) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([[0.0, 1.0, -1.0], [3.0, 5.0, 3.0], [0.0, 1.0, -1]]) * u.m
self.q2 = np.array([0.0, 100.0, 150.0, 200.0]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop("unit", self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True),
),
)
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize(
"kwargs",
(
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1),
),
)
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize("kwargs", (dict(), dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0.0, np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0.0, 1.0, 2.0, np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.0) * u.m
out = np.ediff1d(x, to_begin=-12.5 * u.cm, to_end=1 * u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.0) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize("function", [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1 * u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup_method(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.0).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup_method(self):
self.q = (
np.array(
[[ 1.0, -1.0, 2.0], # noqa: E201
[ 0.0, 3.0, -1.0], # noqa: E201
[-1.0, -1.0, 1.0]]
) << u.m
) # fmt: skip
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.0).reshape(3, 3) / 4 * u.m
tol = 1.0 * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.0) * np.arange(5.0)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = (
np.linalg.pinv(self.q.value, rcond.to_value(self.q.unit)) / self.q.unit
)
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, "unit")
@needs_array_function
def test_solve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value, rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit**2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.0 * u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1.0, 1.0, 1.0]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.0 * u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit**0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype(
[("pv", np.dtype([("pp", "f8"), ("vv", "f8")])), ("t", "f8")]
)
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[((4.0, 2.5), 0.0), ((5.0, 5.0), 1.0), ((6.0, 7.5), 2.0)], self.pv_t_dtype
)
self.pv_unit = u.StructuredUnit((u.km, u.km / u.s), ("p", "v"))
self.pv_t_unit = u.StructuredUnit((self.pv_unit, u.s), ("pv", "t"))
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit("m, (cm, um)")
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype = np.dtype([("f1", float), ("f2", float), ("f3", float)])
# It works.
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# Can't structure something that's already structured.
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
def test_merge_arrays_repeat_dtypes(self):
# Cannot merge things with repeat dtypes.
q1 = u.Quantity([(1,)], dtype=[("f1", float)])
q2 = u.Quantity([(1,)], dtype=[("f1", float)])
with pytest.raises(ValueError, match="field 'f1' occurs more than once"):
rfn.merge_arrays((q1, q2))
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays(self, flatten):
"""Test `numpy.lib.recfunctions.merge_arrays`."""
# Merge single normal array.
arr = rfn.merge_arrays(self.q_pv["p"], flatten=flatten)
assert_array_equal(arr["f0"], self.q_pv["p"])
assert arr.unit == (u.km,)
# Merge single structured array.
arr = rfn.merge_arrays(self.q_pv, flatten=flatten)
assert_array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
# Merge 1-element tuple.
arr = rfn.merge_arrays((self.q_pv,), flatten=flatten)
assert np.array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
@pytest.mark.xfail
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_nonquantities(self, flatten):
# Fails because cannot create quantity from structured array.
arr = rfn.merge_arrays((q_pv["p"], q_pv.value), flatten=flatten)
def test_merge_array_nested_structure(self):
# Merge 2-element tuples without flattening.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t))
assert_array_equal(arr["f0"], self.q_pv)
assert_array_equal(arr["f1"], self.q_pv_t)
assert arr.unit == ((u.km, u.km / u.s), ((u.km, u.km / u.s), u.s))
def test_merge_arrays_flatten_nested_structure(self):
# Merge 2-element tuple, flattening it.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t), flatten=True)
assert_array_equal(arr["p"], self.q_pv["p"])
assert_array_equal(arr["v"], self.q_pv["v"])
assert_array_equal(arr["pp"], self.q_pv_t["pv"]["pp"])
assert_array_equal(arr["vv"], self.q_pv_t["pv"]["vv"])
assert_array_equal(arr["t"], self.q_pv_t["t"])
assert arr.unit == (u.km, u.km / u.s, u.km, u.km / u.s, u.s)
def test_merge_arrays_asrecarray(self):
with pytest.raises(ValueError, match="asrecarray=True is not supported."):
rfn.merge_arrays(self.q_pv, asrecarray=True)
def test_merge_arrays_usemask(self):
with pytest.raises(ValueError, match="usemask=True is not supported."):
rfn.merge_arrays(self.q_pv, usemask=True)
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_str(self, flatten):
with pytest.raises(
TypeError, match="the Quantity implementation cannot handle"
):
rfn.merge_arrays((self.q_pv, np.array(["a", "b", "c"])), flatten=flatten)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
} # fmt: skip
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (
SUBCLASS_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(FUNCTION_HELPERS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
156d5f6f2066064261759f812efec6eb9249721a64178f11da94418bfed55c67 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numbers
import numpy as np
from astropy.units import (
CompositeUnit,
Unit,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
photometric,
)
from .core import FunctionQuantity, FunctionUnitBase
from .units import dB, dex, mag
__all__ = [
"LogUnit",
"MagUnit",
"DexUnit",
"DecibelUnit",
"LogQuantity",
"Magnitude",
"Decibel",
"Dex",
"STmag",
"ABmag",
"M_bol",
"m_bol",
]
class LogUnit(FunctionUnitBase):
"""Logarithmic unit containing a physical one
Usually, logarithmic units are instantiated via specific subclasses
such `MagUnit`, `DecibelUnit`, and `DexUnit`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the logarithmic function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the logarithmic unit set by the subclass.
"""
# the four essential overrides of FunctionUnitBase
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return LogQuantity
def from_physical(self, x):
"""Transformation from value in physical to value in logarithmic units.
Used in equivalency."""
return dex.to(self._function_unit, np.log10(x))
def to_physical(self, x):
"""Transformation from value in logarithmic to value in physical units.
Used in equivalency."""
return 10 ** self._function_unit.to(dex, x)
# ^^^^ the four essential overrides of FunctionUnitBase
# add addition and subtraction, which imply multiplication/division of
# the underlying physical units
def _add_and_adjust_physical_unit(self, other, sign_self, sign_other):
"""Add/subtract LogUnit to/from another unit, and adjust physical unit.
self and other are multiplied by sign_self and sign_other, resp.
We wish to do: ±lu_1 + ±lu_2 -> lu_f (lu=logarithmic unit)
and pu_1^(±1) * pu_2^(±1) -> pu_f (pu=physical unit)
Raises
------
UnitsError
If function units are not equivalent.
"""
# First, insist on compatible logarithmic type. Here, plain u.mag,
# u.dex, and u.dB are OK, i.e., other does not have to be LogUnit
# (this will indirectly test whether other is a unit at all).
try:
getattr(other, "function_unit", other)._to(self._function_unit)
except AttributeError:
# if other is not a unit (i.e., does not have _to).
return NotImplemented
except UnitsError:
raise UnitsError(
"Can only add/subtract logarithmic units of compatible type."
)
other_physical_unit = getattr(other, "physical_unit", dimensionless_unscaled)
physical_unit = CompositeUnit(
1, [self._physical_unit, other_physical_unit], [sign_self, sign_other]
)
return self._copy(physical_unit)
def __neg__(self):
return self._copy(self.physical_unit ** (-1))
def __add__(self, other):
# Only know how to add to a logarithmic unit with compatible type,
# be it a plain one (u.mag, etc.,) or another LogUnit
return self._add_and_adjust_physical_unit(other, +1, +1)
def __radd__(self, other):
return self._add_and_adjust_physical_unit(other, +1, +1)
def __sub__(self, other):
return self._add_and_adjust_physical_unit(other, +1, -1)
def __rsub__(self, other):
# here, in normal usage other cannot be LogUnit; only equivalent one
# would be u.mag,u.dB,u.dex. But might as well use common routine.
return self._add_and_adjust_physical_unit(other, -1, +1)
class MagUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``mag``, but this allows one to use an equivalent
unit such as ``2 mag``.
"""
@property
def _default_function_unit(self):
return mag
@property
def _quantity_class(self):
return Magnitude
class DexUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dex``, but this allows one to use an equivalent
unit such as ``0.5 dex``.
"""
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return Dex
def to_string(self, format="generic"):
if format == "cds":
if self.physical_unit == dimensionless_unscaled:
return "[-]" # by default, would get "[---]".
else:
return f"[{self.physical_unit.to_string(format=format)}]"
else:
return super().to_string()
class DecibelUnit(LogUnit):
"""Logarithmic physical units expressed in dB
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the decibel function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dB``, but this allows one to use an equivalent
unit such as ``2 dB``.
"""
@property
def _default_function_unit(self):
return dB
@property
def _quantity_class(self):
return Decibel
class LogQuantity(FunctionQuantity):
"""A representation of a (scaled) logarithm of a number with a unit
Parameters
----------
value : number, `~astropy.units.Quantity`, `~astropy.units.function.logarithmic.LogQuantity`, or sequence of quantity-like.
The numerical value of the logarithmic quantity. If a number or
a `~astropy.units.Quantity` with a logarithmic unit, it will be
converted to ``unit`` and the physical unit will be inferred from
``unit``. If a `~astropy.units.Quantity` with just a physical unit,
it will converted to the logarithmic unit, after, if necessary,
converting it to the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.function.FunctionUnitBase`, optional
For an `~astropy.units.function.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The ``dtype`` of the resulting Numpy array or scalar that will
hold the value. If not provided, is is determined automatically
from the input value.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
Examples
--------
Typically, use is made of an `~astropy.units.function.FunctionQuantity`
subclass, as in::
>>> import astropy.units as u
>>> u.Magnitude(-2.5)
<Magnitude -2.5 mag>
>>> u.Magnitude(10.*u.count/u.second)
<Magnitude -2.5 mag(ct / s)>
>>> u.Decibel(1.*u.W, u.DecibelUnit(u.mW)) # doctest: +FLOAT_CMP
<Decibel 30. dB(mW)>
"""
# only override of FunctionQuantity
_unit_class = LogUnit
# additions that work just for logarithmic units
def __add__(self, other):
# Add function units, thus multiplying physical units. If no unit is
# given, assume dimensionless_unscaled; this will give the appropriate
# exception in LogUnit.__add__.
new_unit = self.unit + getattr(other, "unit", dimensionless_unscaled)
# Add actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view + getattr(other, "_function_view", other)
return self._new_view(result, new_unit)
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
new_unit = self.unit + getattr(other, "unit", dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view += getattr(other, "_function_view", other)
self._set_unit(new_unit)
return self
def __sub__(self, other):
# Subtract function units, thus dividing physical units.
new_unit = self.unit - getattr(other, "unit", dimensionless_unscaled)
# Subtract actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view - getattr(other, "_function_view", other)
return self._new_view(result, new_unit)
def __rsub__(self, other):
new_unit = self.unit.__rsub__(getattr(other, "unit", dimensionless_unscaled))
result = self._function_view.__rsub__(getattr(other, "_function_view", other))
# Ensure the result is in right function unit scale
# (with rsub, this does not have to be one's own).
result = result.to(new_unit.function_unit)
return self._new_view(result, new_unit)
def __isub__(self, other):
new_unit = self.unit - getattr(other, "unit", dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view -= getattr(other, "_function_view", other)
self._set_unit(new_unit)
return self
def __mul__(self, other):
# Multiply by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Multiplying a log means putting the factor into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit**other
result = self.view(np.ndarray) * other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__mul__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit**other
function_view = self._function_view
function_view *= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__imul__(other)
def __truediv__(self, other):
# Divide by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Dividing a log means putting the nominator into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit ** (1 / other)
result = self.view(np.ndarray) / other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__truediv__(other)
def __itruediv__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit ** (1 / other)
function_view = self._function_view
function_view /= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__itruediv__(other)
def __pow__(self, other):
# We check if this power is OK by applying it first to the unit.
try:
other = float(other)
except TypeError:
return NotImplemented
new_unit = self.unit**other
new_value = self.view(np.ndarray) ** other
return self._new_view(new_value, new_unit)
def __ilshift__(self, other):
try:
other = Unit(other)
except UnitTypeError:
return NotImplemented
if not isinstance(other, self._unit_class):
return NotImplemented
try:
factor = self.unit.physical_unit._to(other.physical_unit)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] += self.unit.from_physical(factor)
self._set_unit(other)
return self
# Methods that do not work for function units generally but are OK for
# logarithmic units as they imply differences and independence of
# physical unit.
def var(self, axis=None, dtype=None, out=None, ddof=0):
unit = self.unit.function_unit**2
return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof, unit=unit)
def std(self, axis=None, dtype=None, out=None, ddof=0):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof, unit=unit)
def ptp(self, axis=None, out=None):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.ptp, axis, out=out, unit=unit)
def diff(self, n=1, axis=-1):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.diff, n, axis, unit=unit)
def ediff1d(self, to_end=None, to_begin=None):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.ediff1d, to_end, to_begin, unit=unit)
_supported_functions = FunctionQuantity._supported_functions | {
getattr(np, function) for function in ("var", "std", "ptp", "diff", "ediff1d")
}
class Dex(LogQuantity):
_unit_class = DexUnit
class Decibel(LogQuantity):
_unit_class = DecibelUnit
class Magnitude(LogQuantity):
_unit_class = MagUnit
dex._function_unit_class = DexUnit
dB._function_unit_class = DecibelUnit
mag._function_unit_class = MagUnit
STmag = MagUnit(photometric.STflux)
STmag.__doc__ = "ST magnitude: STmag=-21.1 corresponds to 1 erg/s/cm2/A"
ABmag = MagUnit(photometric.ABflux)
ABmag.__doc__ = "AB magnitude: ABmag=-48.6 corresponds to 1 erg/s/cm2/Hz"
M_bol = MagUnit(photometric.Bol)
M_bol.__doc__ = (
f"Absolute bolometric magnitude: M_bol=0 corresponds to L_bol0={photometric.Bol.si}"
)
m_bol = MagUnit(photometric.bol)
m_bol.__doc__ = (
f"Apparent bolometric magnitude: m_bol=0 corresponds to f_bol0={photometric.bol.si}"
)
|
7407e61683cb22ac667755daa5f694b66887249d2d3e6a48f8edcd0876645934 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Function Units and Quantities."""
from abc import ABCMeta, abstractmethod
import numpy as np
from astropy.units import (
Quantity,
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
__all__ = ["FunctionUnitBase", "FunctionQuantity"]
SUPPORTED_UFUNCS = {
getattr(np.core.umath, ufunc)
for ufunc in (
"isfinite",
"isinf",
"isnan",
"sign",
"signbit",
"rint",
"floor",
"ceil",
"trunc",
"_ones_like",
"ones_like",
"positive",
)
if hasattr(np.core.umath, ufunc)
}
# TODO: the following could work if helper changed relative to Quantity:
# - spacing should return dimensionless, not same unit
# - negative should negate unit too,
# - add, subtract, comparisons can work if units added/subtracted
SUPPORTED_FUNCTIONS = {
getattr(np, function)
for function in ("clip", "trace", "mean", "min", "max", "round")
}
# subclassing UnitBase or CompositeUnit was found to be problematic, requiring
# a large number of overrides. Hence, define new class.
class FunctionUnitBase(metaclass=ABCMeta):
"""Abstract base class for function units.
Function units are functions containing a physical unit, such as dB(mW).
Most of the arithmetic operations on function units are defined in this
base class.
While instantiation is defined, this class should not be used directly.
Rather, subclasses should be used that override the abstract properties
`_default_function_unit` and `_quantity_class`, and the abstract methods
`from_physical`, and `to_physical`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the function unit set by the subclass.
"""
# ↓↓↓ the following four need to be set by subclasses
# Make this a property so we can ensure subclasses define it.
@property
@abstractmethod
def _default_function_unit(self):
"""Default function unit corresponding to the function.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.mag`.
"""
# This has to be a property because the function quantity will not be
# known at unit definition time, as it gets defined after.
@property
@abstractmethod
def _quantity_class(self):
"""Function quantity class corresponding to this function unit.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`.
"""
@abstractmethod
def from_physical(self, x):
"""Transformation from value in physical to value in function units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
@abstractmethod
def to_physical(self, x):
"""Transformation from value in function to value in physical units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
# ↑↑↑ the above four need to be set by subclasses
# have priority over arrays, regular units, and regular quantities
__array_priority__ = 30000
def __init__(self, physical_unit=None, function_unit=None):
if physical_unit is None:
physical_unit = dimensionless_unscaled
else:
physical_unit = Unit(physical_unit)
if not isinstance(physical_unit, UnitBase) or physical_unit.is_equivalent(
self._default_function_unit
):
raise UnitConversionError(f"{physical_unit} is not a physical unit.")
if function_unit is None:
function_unit = self._default_function_unit
else:
# any function unit should be equivalent to subclass default
function_unit = Unit(getattr(function_unit, "function_unit", function_unit))
if not function_unit.is_equivalent(self._default_function_unit):
raise UnitConversionError(
f"Cannot initialize '{self.__class__.__name__}' instance with "
f"function unit '{function_unit}', as it is not equivalent to "
f"default function unit '{self._default_function_unit}'."
)
self._physical_unit = physical_unit
self._function_unit = function_unit
def _copy(self, physical_unit=None):
"""Copy oneself, possibly with a different physical unit."""
if physical_unit is None:
physical_unit = self.physical_unit
return self.__class__(physical_unit, self.function_unit)
@property
def physical_unit(self):
return self._physical_unit
@property
def function_unit(self):
return self._function_unit
@property
def equivalencies(self):
"""List of equivalencies between function and physical units.
Uses the `from_physical` and `to_physical` methods.
"""
return [(self, self.physical_unit, self.to_physical, self.from_physical)]
# ↓↓↓ properties/methods required to behave like a unit
def decompose(self, bases=set()):
"""Copy the current unit with the physical unit decomposed.
For details, see `~astropy.units.UnitBase.decompose`.
"""
return self._copy(self.physical_unit.decompose(bases))
@property
def si(self):
"""Copy the current function unit with the physical unit in SI."""
return self._copy(self.physical_unit.si)
@property
def cgs(self):
"""Copy the current function unit with the physical unit in CGS."""
return self._copy(self.physical_unit.cgs)
def _get_physical_type_id(self):
"""Get physical type corresponding to physical unit."""
return self.physical_unit._get_physical_type_id()
@property
def physical_type(self):
"""Return the physical type of the physical unit (e.g., 'length')."""
return self.physical_unit.physical_type
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, string, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to the built-in equivalencies between the
function unit and the physical one, as well as possible global
defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`.
Use `None` to turn off any global equivalencies.
Returns
-------
bool
"""
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other_physical_unit = getattr(
other,
"physical_unit",
(
dimensionless_unscaled
if self.function_unit.is_equivalent(other)
else other
),
)
return self.physical_unit.is_equivalent(other_physical_unit, equivalencies)
def to(self, other, value=1.0, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : `~astropy.units.Unit`, `~astropy.units.function.FunctionUnitBase`, or str
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the specified unit.
If not provided, defaults to 1.0.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in meant to treat only equivalencies between different
physical units; the built-in equivalency between the function
unit and the physical one is automatically taken into account.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
# conversion to one's own physical unit should be fastest
if other is self.physical_unit:
return self.to_physical(value)
other_function_unit = getattr(other, "function_unit", other)
if self.function_unit.is_equivalent(other_function_unit):
# when other is an equivalent function unit:
# first convert physical units to other's physical units
other_physical_unit = getattr(
other, "physical_unit", dimensionless_unscaled
)
if self.physical_unit != other_physical_unit:
value_other_physical = self.physical_unit.to(
other_physical_unit, self.to_physical(value), equivalencies
)
# make function unit again, in own system
value = self.from_physical(value_other_physical)
# convert possible difference in function unit (e.g., dex->dB)
return self.function_unit.to(other_function_unit, value)
else:
try:
# when other is not a function unit
return self.physical_unit.to(
other, self.to_physical(value), equivalencies
)
except UnitConversionError as e:
if self.function_unit == Unit("mag"):
# One can get to raw magnitudes via math that strips the dimensions off.
# Include extra information in the exception to remind users of this.
msg = "Did you perhaps subtract magnitudes so the unit got lost?"
e.args += (msg,)
raise e
else:
raise
def is_unity(self):
return False
def __eq__(self, other):
return self.physical_unit == getattr(
other, "physical_unit", dimensionless_unscaled
) and self.function_unit == getattr(other, "function_unit", other)
def __ne__(self, other):
return not self.__eq__(other)
def __rlshift__(self, other):
"""Unit conversion operator ``<<``"""
try:
return self._quantity_class(other, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __mul__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit * other
else:
raise UnitsError(
"Cannot multiply a function unit with a physical dimension "
"with any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit / other
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"by any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(1.0 / other, unit=self)
except Exception:
return NotImplemented
def __rtruediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return other / self.function_unit
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"into any unit"
)
else:
# Don't know what to do with anything not like a unit.
return NotImplemented
def __pow__(self, power):
if power == 0:
return dimensionless_unscaled
elif power == 1:
return self._copy()
if self.physical_unit == dimensionless_unscaled:
return self.function_unit**power
raise UnitsError(
"Cannot raise a function unit with a physical dimension "
"to any power but 0 or 1."
)
def __pos__(self):
return self._copy()
def to_string(self, format="generic"):
"""
Output the unit in the given format as a string.
The physical unit is appended, within parentheses, to the function
unit, as in "dB(mW)", with both units set using the given format
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
if format not in ("generic", "unscaled", "latex", "latex_inline"):
raise ValueError(
f"Function units cannot be written in {format} "
"format. Only 'generic', 'unscaled', 'latex' and "
"'latex_inline' are supported."
)
self_str = self.function_unit.to_string(format)
pu_str = self.physical_unit.to_string(format)
if pu_str == "":
pu_str = "1"
if format.startswith("latex"):
# need to strip leading and trailing "$"
self_str += rf"$\mathrm{{\left( {pu_str[1:-1]} \right)}}$"
else:
self_str += f"({pu_str})"
return self_str
def __str__(self):
"""Return string representation for unit."""
self_str = str(self.function_unit)
pu_str = str(self.physical_unit)
if pu_str:
self_str += f"({pu_str})"
return self_str
def __repr__(self):
# By default, try to give a representation using `Unit(<string>)`,
# with string such that parsing it would give the correct FunctionUnit.
if callable(self.function_unit):
return f'Unit("{self.to_string()}")'
else:
return '{}("{}"{})'.format(
self.__class__.__name__,
self.physical_unit,
""
if self.function_unit is self._default_function_unit
else f', unit="{self.function_unit}"',
)
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return self.to_string("latex")
def __hash__(self):
return hash((self.function_unit, self.physical_unit))
class FunctionQuantity(Quantity):
"""A representation of a (scaled) function of a number with a unit.
Function quantities are quantities whose units are functions containing a
physical unit, such as dB(mW). Most of the arithmetic operations on
function quantities are defined in this base class.
While instantiation is also defined here, this class should not be
instantiated directly. Rather, subclasses should be made which have
``_unit_class`` pointing back to the corresponding function unit class.
Parameters
----------
value : number, quantity-like, or sequence thereof
The numerical value of the function quantity. If a number or
a `~astropy.units.Quantity` with a function unit, it will be converted
to ``unit`` and the physical unit will be inferred from ``unit``.
If a `~astropy.units.Quantity` with just a physical unit, it will
converted to the function unit, after, if necessary, converting it to
the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.function.FunctionUnitBase`, optional
For an `~astropy.units.function.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. Ignored
if the input does not need to be converted and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be of the
class used. Otherwise, subclasses will be passed through.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`~astropy.units.Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not a `~astropy.units.function.FunctionUnitBase`
or `~astropy.units.Unit` object, or a parseable string unit.
"""
_unit_class = None
"""Default `~astropy.units.function.FunctionUnitBase` subclass.
This should be overridden by subclasses.
"""
# Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit.
__array_priority__ = 40000
# Define functions that work on FunctionQuantity.
_supported_ufuncs = SUPPORTED_UFUNCS
_supported_functions = SUPPORTED_FUNCTIONS
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# Convert possible string input to a (function) unit.
unit = Unit(unit)
if not isinstance(unit, FunctionUnitBase):
# By default, use value's physical unit.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# if iterable, see if first item has a unit
# (mixed lists fail in super call below).
try:
value_unit = getattr(value[0], "unit")
except Exception:
pass
physical_unit = getattr(value_unit, "physical_unit", value_unit)
unit = cls._unit_class(physical_unit, function_unit=unit)
# initialise!
return super().__new__(
cls,
value,
unit,
dtype=dtype,
copy=copy,
order=order,
subok=subok,
ndmin=ndmin,
)
# ↓↓↓ properties not found in Quantity
@property
def physical(self):
"""The physical quantity corresponding the function one."""
return self.to(self.unit.physical_unit)
@property
def _function_view(self):
"""View as Quantity with function unit, dropping the physical unit.
Use `~astropy.units.quantity.Quantity.value` for just the value.
"""
return self._new_view(unit=self.unit.function_unit)
# ↓↓↓ methods overridden to change the behavior
@property
def si(self):
"""Return a copy with the physical unit in SI units."""
return self.__class__(self.physical.si)
@property
def cgs(self):
"""Return a copy with the physical unit in CGS units."""
return self.__class__(self.physical.cgs)
def decompose(self, bases=[]):
"""Generate a new `FunctionQuantity` with the physical unit decomposed.
For details, see `~astropy.units.Quantity.decompose`.
"""
return self.__class__(self.physical.decompose(bases))
# ↓↓↓ methods overridden to add additional behavior
def __quantity_subclass__(self, unit):
if isinstance(unit, FunctionUnitBase):
return self.__class__, True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if not isinstance(unit, self._unit_class):
# Have to take care of, e.g., (10*u.mag).view(u.Magnitude)
try:
# "or 'nonsense'" ensures `None` breaks, just in case.
unit = self._unit_class(function_unit=unit or "nonsense")
except Exception:
raise UnitTypeError(
f"{type(self).__name__} instances require"
f" {self._unit_class.__name__} function units, so cannot set it to"
f" '{unit}'."
)
self._unit = unit
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# TODO: it would be more logical to have this in Quantity already,
# instead of in UFUNC_HELPERS, where it cannot be overridden.
# And really it should just return NotImplemented, since possibly
# another argument might know what to do.
if function not in self._supported_ufuncs:
raise UnitTypeError(
f"Cannot use ufunc '{function.__name__}' with function quantities"
)
return super().__array_ufunc__(function, method, *inputs, **kwargs)
def _maybe_new_view(self, result):
"""View as function quantity if the unit is unchanged.
Used for the case that self.unit.physical_unit is dimensionless,
where multiplication and division is done using the Quantity
equivalent, to transform them back to a FunctionQuantity if possible.
"""
if isinstance(result, Quantity) and result.unit == self.unit:
return self._new_view(result)
else:
return result
# ↓↓↓ methods overridden to change behavior
def __mul__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view * other)
raise UnitTypeError(
"Cannot multiply function quantities which are not dimensionless "
"with anything."
)
def __truediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view / other)
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless by anything."
)
def __rtruediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view.__rtruediv__(other))
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless "
"into anything."
)
def _comparison(self, other, comparison_func):
"""Do a comparison between self and other, raising UnitsError when
other cannot be converted to self because it has different physical
unit, and returning NotImplemented when there are other errors."""
try:
# will raise a UnitsError if physical units not equivalent
other_in_own_unit = self._to_own_unit(other, check_precision=False)
except UnitsError as exc:
if self.unit.physical_unit != dimensionless_unscaled:
raise exc
try:
other_in_own_unit = self._function_view._to_own_unit(
other, check_precision=False
)
except Exception:
raise exc
except Exception:
return NotImplemented
return comparison_func(other_in_own_unit)
def __eq__(self, other):
try:
return self._comparison(other, self.value.__eq__)
except UnitsError:
return False
def __ne__(self, other):
try:
return self._comparison(other, self.value.__ne__)
except UnitsError:
return True
def __gt__(self, other):
return self._comparison(other, self.value.__gt__)
def __ge__(self, other):
return self._comparison(other, self.value.__ge__)
def __lt__(self, other):
return self._comparison(other, self.value.__lt__)
def __le__(self, other):
return self._comparison(other, self.value.__le__)
def __lshift__(self, other):
"""Unit conversion operator `<<`"""
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
# Ensure Quantity methods are used only if they make sense.
def _wrap_function(self, function, *args, **kwargs):
if function in self._supported_functions:
return super()._wrap_function(function, *args, **kwargs)
# For dimensionless, we can convert to regular quantities.
if all(
arg.unit.physical_unit == dimensionless_unscaled
for arg in (self,) + args
if (hasattr(arg, "unit") and hasattr(arg.unit, "physical_unit"))
):
args = tuple(getattr(arg, "_function_view", arg) for arg in args)
return self._function_view._wrap_function(function, *args, **kwargs)
raise TypeError(
f"Cannot use method that uses function '{function.__name__}' with "
"function quantities that are not dimensionless."
)
# Override functions that are supported but do not use _wrap_function
# in Quantity.
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out, keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(
np.clip, self._to_own_unit(a_min), self._to_own_unit(a_max), out=out
)
|
dbdf028420344eed861bece2d6bb9c6c666f9137e87ef2b8949b69c38be57fc6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units that can also be used as functions of other units.
If called, their arguments are used to initialize the corresponding function
unit (e.g., ``u.mag(u.ct/u.s)``). Note that the prefixed versions cannot be
called, as it would be unclear what, e.g., ``u.mmag(u.ct/u.s)`` would mean.
"""
from astropy.units.core import _add_prefixes
from .mixin import IrreducibleFunctionUnit, RegularFunctionUnit
_ns = globals()
###########################################################################
# Logarithmic units
# These calls are what core.def_unit would do, but we need to use the callable
# unit versions. The actual function unit classes get added in logarithmic.
dex = IrreducibleFunctionUnit(
["dex"], namespace=_ns, doc="Dex: Base 10 logarithmic unit"
)
dB = RegularFunctionUnit(
["dB", "decibel"],
0.1 * dex,
namespace=_ns,
doc="Decibel: ten per base 10 logarithmic unit",
)
mag = RegularFunctionUnit(
["mag"],
-0.4 * dex,
namespace=_ns,
doc="Astronomical magnitude: -2.5 per base 10 logarithmic unit",
)
_add_prefixes(mag, namespace=_ns, prefixes=True)
###########################################################################
# CLEANUP
del RegularFunctionUnit
del IrreducibleFunctionUnit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from astropy.units.utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
633912434436c57e9dd33f29053f10e405a0d2f477e73887f0d6391a652ca27e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
from collections.abc import MappingView
from types import MappingProxyType
import numpy as np
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.angles import Angle
from astropy.coordinates.attributes import (
CoordinateAttribute,
DifferentialAttribute,
QuantityAttribute,
)
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
base_doc,
frame_transform_graph,
)
from astropy.coordinates.errors import ConvertError
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import AffineTransform
from astropy.utils.decorators import classproperty, deprecated, format_doc
from astropy.utils.state import ScienceState
from .icrs import ICRS
__all__ = ["Galactocentric"]
# Measured by minimizing the difference between a plane of coordinates along
# l=0, b=[-90,90] and the Galactocentric x-z plane
# This is not used directly, but accessed via `get_roll0`. We define it here to
# prevent having to create new Angle objects every time `get_roll0` is called.
_ROLL0 = Angle(58.5986320306 * u.degree)
class _StateProxy(MappingView):
"""
`~collections.abc.MappingView` with a read-only ``getitem`` through
`~types.MappingProxyType`.
"""
def __init__(self, mapping):
super().__init__(mapping)
self._mappingproxy = MappingProxyType(self._mapping) # read-only
def __getitem__(self, key):
"""Read-only ``getitem``."""
return self._mappingproxy[key]
def __deepcopy__(self, memo):
return copy.deepcopy(self._mapping, memo=memo)
class galactocentric_frame_defaults(ScienceState):
"""This class controls the global setting of default values for the frame
attributes in the `~astropy.coordinates.Galactocentric` frame, which may be
updated in future versions of ``astropy``. Note that when using
`~astropy.coordinates.Galactocentric`, changing values here will not affect
any attributes that are set explicitly by passing values in to the
`~astropy.coordinates.Galactocentric` initializer. Modifying these defaults
will only affect the frame attribute values when using the frame as, e.g.,
``Galactocentric`` or ``Galactocentric()`` with no explicit arguments.
This class controls the parameter settings by specifying a string name,
with the following pre-specified options:
- 'pre-v4.0': The current default value, which sets the default frame
attribute values to their original (pre-astropy-v4.0) values.
- 'v4.0': The attribute values as updated in Astropy version 4.0.
- 'latest': An alias of the most recent parameter set (currently: 'v4.0')
Alternatively, user-defined parameter settings may be registered, with
:meth:`~astropy.coordinates.galactocentric_frame_defaults.register`,
and used identically as pre-specified parameter sets. At minimum,
registrations must have unique names and a dictionary of parameters
with keys "galcen_coord", "galcen_distance", "galcen_v_sun", "z_sun",
"roll". See examples below.
This class also tracks the references for all parameter values in the
attribute ``references``, as well as any further information the registry.
The pre-specified options can be extended to include similar
state information as user-defined parameter settings -- for example, to add
parameter uncertainties.
The preferred method for getting a parameter set and metadata, by name, is
:meth:`~galactocentric_frame_defaults.get_from_registry` since
it ensures the immutability of the registry.
See :ref:`astropy:astropy-coordinates-galactocentric-defaults` for more
information.
Examples
--------
The default `~astropy.coordinates.Galactocentric` frame parameters can be
modified globally::
>>> from astropy.coordinates import galactocentric_frame_defaults
>>> _ = galactocentric_frame_defaults.set('v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg)>
>>> _ = galactocentric_frame_defaults.set('pre-v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
The default parameters can also be updated by using this class as a context
manager::
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric()) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Again, changing the default parameter values will not affect frame
attributes that are explicitly specified::
>>> import astropy.units as u
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric(galcen_distance=8.0*u.kpc)) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.0 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Additional parameter sets may be registered, for instance to use the
Dehnen & Binney (1998) measurements of the solar motion. We can also
add metadata, such as the 1-sigma errors. In this example we will modify
the required key "parameters", change the recommended key "references" to
match "parameters", and add the extra key "error" (any key can be added)::
>>> state = galactocentric_frame_defaults.get_from_registry("v4.0")
>>> state["parameters"]["galcen_v_sun"] = (10.00, 225.25, 7.17) * (u.km / u.s)
>>> state["references"]["galcen_v_sun"] = "https://ui.adsabs.harvard.edu/full/1998MNRAS.298..387D"
>>> state["error"] = {"galcen_v_sun": (0.36, 0.62, 0.38) * (u.km / u.s)}
>>> galactocentric_frame_defaults.register(name="DB1998", **state)
Just as in the previous examples, the new parameter set can be retrieved with::
>>> state = galactocentric_frame_defaults.get_from_registry("DB1998")
>>> print(state["error"]["galcen_v_sun"]) # doctest: +FLOAT_CMP
[0.36 0.62 0.38] km / s
"""
_latest_value = "v4.0"
_value = None
_references = None
_state = dict() # all other data
# Note: _StateProxy() produces read-only view of enclosed mapping.
_registry = {
"v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.122 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[12.9, 245.6, 7.78] * (u.km / u.s)
),
"z_sun": 20.8 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": (
"https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R"
),
"galcen_distance": (
"https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G"
),
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/abs/2018RNAAS...2..210D",
"https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G",
"https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
],
"z_sun": "https://ui.adsabs.harvard.edu/abs/2019MNRAS.482.1417B",
"roll": None,
}
),
},
"pre-v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.3 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[11.1, 220 + 12.24, 7.25] * (u.km / u.s)
),
"z_sun": 27.0 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": (
"https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R"
),
"galcen_distance": (
"https://ui.adsabs.harvard.edu/#abs/2009ApJ...692.1075G"
),
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/#abs/2010MNRAS.403.1829S",
"https://ui.adsabs.harvard.edu/#abs/2015ApJS..216...29B",
],
"z_sun": "https://ui.adsabs.harvard.edu/#abs/2001ApJ...553..184C",
"roll": None,
}
),
},
}
@classproperty # read-only
def parameters(cls):
return cls._value
@classproperty # read-only
def references(cls):
return cls._references
@classmethod
def get_from_registry(cls, name: str):
"""
Return Galactocentric solar parameters and metadata given string names
for the parameter sets. This method ensures the returned state is a
mutable copy, so any changes made do not affect the registry state.
Returns
-------
state : dict
Copy of the registry for the string name.
Should contain, at minimum:
- "parameters": dict
Galactocentric solar parameters
- "references" : Dict[str, Union[str, Sequence[str]]]
References for "parameters".
Fields are str or sequence of str.
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
# Resolve the meaning of 'latest': latest parameter set is from v4.0
# - update this as newer parameter choices are added
if name == "latest":
name = cls._latest_value
# Get the state from the registry.
# Copy to ensure registry is immutable to modifications of "_value".
# Raises KeyError if `name` is invalid string input to registry
# to retrieve solar parameters for Galactocentric frame.
state = copy.deepcopy(cls._registry[name]) # ensure mutable
return state
@deprecated("v4.2", alternative="`get_from_registry`")
@classmethod
def get_solar_params_from_string(cls, arg):
"""
Return Galactocentric solar parameters given string names
for the parameter sets.
Returns
-------
parameters : dict
Copy of Galactocentric solar parameters from registry
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
return cls.get_from_registry(arg)["parameters"]
@classmethod
def validate(cls, value):
if value is None:
value = cls._latest_value
if isinstance(value, str):
state = cls.get_from_registry(value)
cls._references = state["references"]
cls._state = state
parameters = state["parameters"]
elif isinstance(value, dict):
parameters = value
elif isinstance(value, Galactocentric):
# turn the frame instance into a dict of frame attributes
parameters = dict()
for k in value.frame_attributes:
parameters[k] = getattr(value, k)
cls._references = value.frame_attribute_references.copy()
cls._state = dict(parameters=parameters, references=cls._references)
else:
raise ValueError(
"Invalid input to retrieve solar parameters for Galactocentric frame:"
" input must be a string, dict, or Galactocentric instance"
)
return parameters
@classmethod
def register(cls, name: str, parameters: dict, references=None, **meta: dict):
"""Register a set of parameters.
Parameters
----------
name : str
The registration name for the parameter and metadata set.
parameters : dict
The solar parameters for Galactocentric frame.
references : dict or None, optional
References for contents of `parameters`.
None becomes empty dict.
**meta : dict, optional
Any other properties to register.
"""
# check on contents of `parameters`
must_have = {"galcen_coord", "galcen_distance", "galcen_v_sun", "z_sun", "roll"}
missing = must_have.difference(parameters)
if missing:
raise ValueError(f"Missing parameters: {missing}")
references = references or {} # None -> {}
state = dict(parameters=parameters, references=references)
state.update(meta) # meta never has keys "parameters" or "references"
cls._registry[name] = state
doc_components = """
x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`x` position component.
y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`y` position component.
z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`z` position component.
v_x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_x` velocity component.
v_y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_y` velocity component.
v_z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_z` velocity component.
"""
doc_footer = """
Other parameters
----------------
galcen_coord : `ICRS`, optional, keyword-only
The ICRS coordinates of the Galactic center.
galcen_distance : `~astropy.units.Quantity`, optional, keyword-only
The distance from the sun to the Galactic center.
galcen_v_sun : `~astropy.coordinates.representation.CartesianDifferential`, `~astropy.units.Quantity` ['speed'], optional, keyword-only
The velocity of the sun *in the Galactocentric frame* as Cartesian
velocity components.
z_sun : `~astropy.units.Quantity` ['length'], optional, keyword-only
The distance from the sun to the Galactic midplane.
roll : `~astropy.coordinates.Angle`, optional, keyword-only
The angle to rotate about the final x-axis, relative to the
orientation for Galactic. For example, if this roll angle is 0,
the final x-z plane will align with the Galactic coordinates x-z
plane. Unless you really know what this means, you probably should
not change this!
Examples
--------
To transform to the Galactocentric frame with the default
frame attributes, pass the uninstantiated class name to the
``transform_to()`` method of a `~astropy.coordinates.SkyCoord` object::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> c = coord.SkyCoord(ra=[158.3122, 24.5] * u.degree,
... dec=[-17.3, 81.52] * u.degree,
... distance=[11.5, 24.12] * u.kpc,
... frame='icrs')
>>> c.transform_to(coord.Galactocentric) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.43489286, -9.40062188, 6.51345359),
(-21.11044918, 18.76334013, 7.83175149)]>
To specify a custom set of parameters, you have to include extra keyword
arguments when initializing the Galactocentric frame object::
>>> c.transform_to(coord.Galactocentric(galcen_distance=8.1*u.kpc)) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.1 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.41284763, -9.40062188, 6.51346272),
(-21.08839478, 18.76334013, 7.83184184)]>
Similarly, transforming from the Galactocentric frame to another coordinate frame::
>>> c = coord.SkyCoord(x=[-8.3, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[0.027, 24.12] * u.kpc,
... frame=coord.Galactocentric)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 88.22423301, 29.88672864, 0.17813456),
(289.72864549, 49.9865043 , 85.93949064)]>
Or, with custom specification of the Galactic center::
>>> c = coord.SkyCoord(x=[-8.0, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[21.0, 24120.0] * u.pc,
... frame=coord.Galactocentric,
... z_sun=21 * u.pc, galcen_distance=8. * u.kpc)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 86.2585249 , 28.85773187, 2.75625475e-05),
(289.77285255, 50.06290457, 8.59216010e+01)]>
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class Galactocentric(BaseCoordinateFrame):
r"""
A coordinate or frame in the Galactocentric system.
This frame allows specifying the Sun-Galactic center distance, the height of
the Sun above the Galactic midplane, and the solar motion relative to the
Galactic center. However, as there is no modern standard definition of a
Galactocentric reference frame, it is important to pay attention to the
default values used in this class if precision is important in your code.
The default values of the parameters of this frame are taken from the
original definition of the frame in 2014. As such, the defaults are somewhat
out of date relative to recent measurements made possible by, e.g., Gaia.
The defaults can, however, be changed at runtime by setting the parameter
set name in `~astropy.coordinates.galactocentric_frame_defaults`.
The current default parameter set is ``"pre-v4.0"``, indicating that the
parameters were adopted before ``astropy`` version 4.0. A regularly-updated
parameter set can instead be used by setting
``galactocentric_frame_defaults.set ('latest')``, and other parameter set
names may be added in future versions. To find out the scientific papers
that the current default parameters are derived from, use
``galcen.frame_attribute_references`` (where ``galcen`` is an instance of
this frame), which will update even if the default parameter set is changed.
The position of the Sun is assumed to be on the x axis of the final,
right-handed system. That is, the x axis points from the position of
the Sun projected to the Galactic midplane to the Galactic center --
roughly towards :math:`(l,b) = (0^\circ,0^\circ)`. For the default
transformation (:math:`{\rm roll}=0^\circ`), the y axis points roughly
towards Galactic longitude :math:`l=90^\circ`, and the z axis points
roughly towards the North Galactic Pole (:math:`b=90^\circ`).
For a more detailed look at the math behind this transformation, see
the document :ref:`astropy:coordinates-galactocentric`.
The frame attributes are listed under **Other Parameters**.
"""
default_representation = r.CartesianRepresentation
default_differential = r.CartesianDifferential
# frame attributes
galcen_coord = CoordinateAttribute(frame=ICRS)
galcen_distance = QuantityAttribute(unit=u.kpc)
galcen_v_sun = DifferentialAttribute(allowed_classes=[r.CartesianDifferential])
z_sun = QuantityAttribute(unit=u.pc)
roll = QuantityAttribute(unit=u.deg)
def __init__(self, *args, **kwargs):
# Set default frame attribute values based on the ScienceState instance
# for the solar parameters defined above
default_params = galactocentric_frame_defaults.get()
self.frame_attribute_references = (
galactocentric_frame_defaults.references.copy()
)
for k in default_params:
if k in kwargs:
# If a frame attribute is set by the user, remove its reference
self.frame_attribute_references.pop(k, None)
# Keep the frame attribute if it is set by the user, otherwise use
# the default value
kwargs[k] = kwargs.get(k, default_params[k])
super().__init__(*args, **kwargs)
@classmethod
def get_roll0(cls):
"""
The additional roll angle (about the final x axis) necessary to align
the final z axis to match the Galactic yz-plane. Setting the ``roll``
frame attribute to -this method's return value removes this rotation,
allowing the use of the `Galactocentric` frame in more general contexts.
"""
# note that the actual value is defined at the module level. We make at
# a property here because this module isn't actually part of the public
# API, so it's better for it to be accessible from Galactocentric
return _ROLL0
# ICRS to/from Galactocentric ----------------------->
def get_matrix_vectors(galactocentric_frame, inverse=False):
"""
Use the ``inverse`` argument to get the inverse transformation, matrix and
offsets to go from Galactocentric to ICRS.
"""
# shorthand
gcf = galactocentric_frame
# rotation matrix to align x(ICRS) with the vector to the Galactic center
mat1 = rotation_matrix(-gcf.galcen_coord.dec, "y")
mat2 = rotation_matrix(gcf.galcen_coord.ra, "z")
# extra roll away from the Galactic x-z plane
mat0 = rotation_matrix(gcf.get_roll0() - gcf.roll, "x")
# construct transformation matrix and use it
R = mat0 @ mat1 @ mat2
# Now need to translate by Sun-Galactic center distance around x' and
# rotate about y' to account for tilt due to Sun's height above the plane
translation = r.CartesianRepresentation(gcf.galcen_distance * [1.0, 0.0, 0.0])
z_d = gcf.z_sun / gcf.galcen_distance
H = rotation_matrix(-np.arcsin(z_d), "y")
# compute total matrices
A = H @ R
# Now we re-align the translation vector to account for the Sun's height
# above the midplane
offset = -translation.transform(H)
if inverse:
# the inverse of a rotation matrix is a transpose, which is much faster
# and more stable to compute
A = matrix_transpose(A)
offset = (-offset).transform(A)
offset_v = r.CartesianDifferential.from_cartesian(
(-gcf.galcen_v_sun).to_cartesian().transform(A)
)
offset = offset.with_differentials(offset_v)
else:
offset = offset.with_differentials(gcf.galcen_v_sun)
return A, offset
def _check_coord_repr_diff_types(c):
if isinstance(c.data, r.UnitSphericalRepresentation):
raise ConvertError(
"Transforming to/from a Galactocentric frame requires a 3D coordinate, e.g."
" (angle, angle, distance) or (x, y, z)."
)
if "s" in c.data.differentials and isinstance(
c.data.differentials["s"],
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
):
raise ConvertError(
"Transforming to/from a Galactocentric frame requires a 3D velocity, e.g.,"
" proper motion components and radial velocity."
)
@frame_transform_graph.transform(AffineTransform, ICRS, Galactocentric)
def icrs_to_galactocentric(icrs_coord, galactocentric_frame):
_check_coord_repr_diff_types(icrs_coord)
return get_matrix_vectors(galactocentric_frame)
@frame_transform_graph.transform(AffineTransform, Galactocentric, ICRS)
def galactocentric_to_icrs(galactocentric_coord, icrs_frame):
_check_coord_repr_diff_types(galactocentric_coord)
return get_matrix_vectors(galactocentric_coord, inverse=True)
# Create loopback transformation
frame_transform_graph._add_merged_transform(Galactocentric, ICRS, Galactocentric)
|
1f19fb31c20ee22a4f5ac94de004c4401c4d921b58703fdd85415d4d8cb65d24 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains the coordinate frames implemented by astropy.
Users shouldn't use this module directly, but rather import from the
`astropy.coordinates` module. While it is likely to exist for the long-term,
the existence of this package and details of its organization should be
considered an implementation detail, and is not guaranteed to hold for future
versions of astropy.
Notes
-----
The builtin frame classes are all imported automatically into this package's
namespace, so there's no need to access the sub-modules directly.
To implement a new frame in Astropy, a developer should add the frame as a new
module in this package. Any "self" transformations (i.e., those that transform
from one frame to another frame of the same class) should be included in that
module. Transformation functions connecting the new frame to other frames
should be in a separate module, which should be imported in this package's
``__init__.py`` to ensure the transformations are hooked up when this package is
imported. Placing the transformation functions in separate modules avoids
circular dependencies, because they need references to the frame classes.
"""
from astropy.coordinates.baseframe import frame_transform_graph
from .altaz import AltAz
from .baseradec import BaseRADecFrame
from .cirs import CIRS
from .ecliptic import (
BarycentricMeanEcliptic,
BarycentricTrueEcliptic,
BaseEclipticFrame,
CustomBarycentricEcliptic,
GeocentricMeanEcliptic,
GeocentricTrueEcliptic,
HeliocentricEclipticIAU76,
HeliocentricMeanEcliptic,
HeliocentricTrueEcliptic,
)
from .equatorial import TEME, TETE
from .fk4 import FK4, FK4NoETerms
from .fk5 import FK5
from .galactic import Galactic
from .galactocentric import Galactocentric, galactocentric_frame_defaults
from .gcrs import GCRS, PrecessedGeocentric
from .hadec import HADec
from .hcrs import HCRS
from .icrs import ICRS
from .itrs import ITRS
from .skyoffset import SkyOffsetFrame
from .supergalactic import Supergalactic
# isort: split
# need to import transformations so that they get registered in the graph
from . import (
cirs_observed_transforms,
fk4_fk5_transforms,
galactic_transforms,
icrs_cirs_transforms,
icrs_fk5_transforms,
icrs_observed_transforms,
intermediate_rotation_transforms,
itrs_observed_transforms,
supergalactic_transforms,
)
# isort: split
from . import ecliptic_transforms
# isort: split
# Import this after importing other frames, since this requires various
# transformations to set up the LSR frames
from .lsr import LSR, LSRD, LSRK, GalacticLSR
# we define an __all__ because otherwise the transformation modules
# get included
__all__ = [
"ICRS",
"FK5",
"FK4",
"FK4NoETerms",
"Galactic",
"Galactocentric",
"galactocentric_frame_defaults",
"Supergalactic",
"AltAz",
"HADec",
"GCRS",
"CIRS",
"ITRS",
"HCRS",
"TEME",
"TETE",
"PrecessedGeocentric",
"GeocentricMeanEcliptic",
"BarycentricMeanEcliptic",
"HeliocentricMeanEcliptic",
"GeocentricTrueEcliptic",
"BarycentricTrueEcliptic",
"HeliocentricTrueEcliptic",
"SkyOffsetFrame",
"GalacticLSR",
"LSR",
"LSRK",
"LSRD",
"BaseEclipticFrame",
"BaseRADecFrame",
"make_transform_graph_docs",
"HeliocentricEclipticIAU76",
"CustomBarycentricEcliptic",
]
def make_transform_graph_docs(transform_graph):
"""
Generates a string that can be used in other docstrings to include a
transformation graph, showing the available transforms and
coordinate systems.
Parameters
----------
transform_graph : `~.coordinates.TransformGraph`
Returns
-------
docstring : str
A string that can be added to the end of a docstring to show the
transform graph.
"""
from textwrap import dedent
coosys = [transform_graph.lookup_name(item) for item in transform_graph.get_names()]
# currently, all of the priorities are set to 1, so we don't need to show
# then in the transform graph.
graphstr = transform_graph.to_dot_graph(addnodes=coosys, priorities=False)
docstr = """
The diagram below shows all of the built in coordinate systems,
their aliases (useful for converting other coordinates to them using
attribute-style access) and the pre-defined transformations between
them. The user is free to override any of these transformations by
defining new transformations between these systems, but the
pre-defined transformations should be sufficient for typical usage.
The color of an edge in the graph (i.e. the transformations between two
frames) is set by the type of transformation; the legend box defines the
mapping from transform class name to color.
.. Wrap the graph in a div with a custom class to allow themeing.
.. container:: frametransformgraph
.. graphviz::
"""
docstr = dedent(docstr) + " " + graphstr.replace("\n", "\n ")
# colors are in dictionary at the bottom of transformations.py
from astropy.coordinates.transformations import trans_to_color
html_list_items = []
for cls, color in trans_to_color.items():
block = f"""
<li style='list-style: none;'>
<p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;">
<b>{cls.__name__}:</b>
<span style="font-size: 24px; color: {color};"><b>➝</b></span>
</p>
</li>
""" # noqa: E501
html_list_items.append(block)
nl = "\n"
graph_legend = f"""
.. raw:: html
<ul>
{nl.join(html_list_items)}
</ul>
"""
docstr = docstr + dedent(graph_legend)
return docstr
_transform_graph_docs = make_transform_graph_docs(frame_transform_graph)
# Here, we override the module docstring so that sphinx renders the transform
# graph without the developer documentation in the main docstring above.
__doc__ = _transform_graph_docs
|
49974e4f90aa06ca6016746001c6778b270831537f71d572439440e885e2bef0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import DifferentialAttribute
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
RepresentationMapping,
base_doc,
frame_transform_graph,
)
from astropy.coordinates.transformations import AffineTransform
from astropy.time import Time
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame
from .baseradec import doc_components as doc_components_radec
from .galactic import Galactic
from .icrs import ICRS
# For speed
J2000 = Time("J2000")
v_bary_Schoenrich2010 = r.CartesianDifferential([11.1, 12.24, 7.25] * u.km / u.s)
__all__ = ["LSR", "GalacticLSR", "LSRK", "LSRD"]
doc_footer_lsr = """
Other parameters
----------------
v_bary : `~astropy.coordinates.representation.CartesianDifferential`
The velocity of the solar system barycenter with respect to the LSR, in
Galactic cartesian velocity components.
"""
@format_doc(base_doc, components=doc_components_radec, footer=doc_footer_lsr)
class LSR(BaseRADecFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR).
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean
velocity of the stars in the solar neighborhood, but the precise definition
of which depends on the study. As defined in Schönrich et al. (2010):
"The LSR is the rest frame at the location of the Sun of a star that would
be on a circular orbit in the gravitational potential one would obtain by
azimuthally averaging away non-axisymmetric features in the actual Galactic
potential." No such orbit truly exists, but it is still a commonly used
velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
# frame attributes:
v_bary = DifferentialAttribute(
default=v_bary_Schoenrich2010, allowed_classes=[r.CartesianDifferential]
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSR)
def icrs_to_lsr(icrs_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_coord)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, LSR, ICRS)
def lsr_to_icrs(lsr_coord, icrs_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_frame)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
doc_components_gal = """
l : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
(``representation`` must be None).
pm_l_cosb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic longitude (including the ``cos(b)`` term)
for this object (``pm_b`` must also be given).
pm_b : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic latitude for this object (``pm_l_cosb``
must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components_gal, footer=doc_footer_lsr)
class GalacticLSR(BaseCoordinateFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR), axis-aligned
to the `Galactic` frame.
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean
velocity of the stars in the solar neighborhood, but the precise definition
of which depends on the study. As defined in Schönrich et al. (2010):
"The LSR is the rest frame at the location of the Sun of a star that would
be on a circular orbit in the gravitational potential one would obtain by
azimuthally averaging away non-axisymmetric features in the actual Galactic
potential." No such orbit truly exists, but it is still a commonly used
velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "l"),
RepresentationMapping("lat", "b"),
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# frame attributes:
v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010)
@frame_transform_graph.transform(AffineTransform, Galactic, GalacticLSR)
def galactic_to_galacticlsr(galactic_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, GalacticLSR, Galactic)
def galacticlsr_to_galactic(lsr_coord, galactic_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
# The LSRK velocity frame, defined as having a velocity of 20 km/s towards
# RA=270 Dec=30 (B1900) relative to the solar system Barycenter. This is defined
# in:
#
# Gordon 1975, Methods of Experimental Physics: Volume 12:
# Astrophysics, Part C: Radio Observations - Section 6.1.5.
class LSRK(BaseRADecFrame):
r"""
A coordinate or frame in the Kinematic Local Standard of Rest (LSR).
This frame is defined as having a velocity of 20 km/s towards RA=270 Dec=30
(B1900) relative to the solar system Barycenter. This is defined in:
Gordon 1975, Methods of Experimental Physics: Volume 12:
Astrophysics, Part C: Radio Observations - Section 6.1.5.
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSRK.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# GORDON1975_V_BARY = 20*u.km/u.s
# GORDON1975_DIRECTION = FK4(ra=270*u.deg, dec=30*u.deg, equinox='B1900')
# V_OFFSET_LSRK = ((GORDON1975_V_BARY * GORDON1975_DIRECTION.transform_to(ICRS()).data)
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRK = r.CartesianDifferential(
[0.28999706839034606, -17.317264789717928, 10.00141199546947] * u.km / u.s
)
ICRS_LSRK_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=V_OFFSET_LSRK
)
LSRK_ICRS_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=-V_OFFSET_LSRK
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRK)
def icrs_to_lsrk(icrs_coord, lsr_frame):
return None, ICRS_LSRK_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRK, ICRS)
def lsrk_to_icrs(lsr_coord, icrs_frame):
return None, LSRK_ICRS_OFFSET
# ------------------------------------------------------------------------------
# The LSRD velocity frame, defined as a velocity of U=9 km/s, V=12 km/s,
# and W=7 km/s in Galactic coordinates or 16.552945 km/s
# towards l=53.13 b=25.02. This is defined in:
#
# Delhaye 1965, Solar Motion and Velocity Distribution of
# Common Stars.
class LSRD(BaseRADecFrame):
r"""
A coordinate or frame in the Dynamical Local Standard of Rest (LSRD)
This frame is defined as a velocity of U=9 km/s, V=12 km/s,
and W=7 km/s in Galactic coordinates or 16.552945 km/s
towards l=53.13 b=25.02. This is defined in:
Delhaye 1965, Solar Motion and Velocity Distribution of
Common Stars.
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSRD.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# V_BARY_DELHAYE1965 = r.CartesianDifferential([9, 12, 7] * u.km/u.s)
# V_OFFSET_LSRD = (Galactic(V_BARY_DELHAYE1965.to_cartesian()).transform_to(ICRS()).data
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRD = r.CartesianDifferential(
[-0.6382306360182073, -14.585424483191094, 7.8011572411006815] * u.km / u.s
)
ICRS_LSRD_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=V_OFFSET_LSRD
)
LSRD_ICRS_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=-V_OFFSET_LSRD
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRD)
def icrs_to_lsrd(icrs_coord, lsr_frame):
return None, ICRS_LSRD_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRD, ICRS)
def lsrd_to_icrs(lsr_coord, icrs_frame):
return None, LSRD_ICRS_OFFSET
# ------------------------------------------------------------------------------
# Create loopback transformations
frame_transform_graph._add_merged_transform(LSR, ICRS, LSR)
frame_transform_graph._add_merged_transform(GalacticLSR, Galactic, GalacticLSR)
|
ad4d3c7eb89b67a636701078afe4a42440f9ef4d2f431da20126ce5c08434545 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the SkyCoord class. Note that there are also SkyCoord tests in
test_api_ape5.py
"""
import copy
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pytest
from erfa import ErfaWarning
from astropy import units as u
from astropy.coordinates import (
FK4,
FK5,
GCRS,
ICRS,
AltAz,
Angle,
Attribute,
BaseCoordinateFrame,
CartesianRepresentation,
EarthLocation,
Galactic,
Latitude,
RepresentationMapping,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
frame_transform_graph,
)
from astropy.coordinates.representation import (
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
)
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.coordinates.transformations import FunctionTransform
from astropy.io import fits
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils import isiterable
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.wcs import WCS
RA = 1.0 * u.deg
DEC = 2.0 * u.deg
C_ICRS = ICRS(RA, DEC)
C_FK5 = C_ICRS.transform_to(FK5())
J2001 = Time("J2001")
def allclose(a, b, rtol=0.0, atol=None):
if atol is None:
atol = 1.0e-8 * getattr(a, "unit", 1.0)
return quantity_allclose(a, b, rtol, atol)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def test_is_transformable_to_str_input():
"""Test method ``is_transformable_to`` with string input.
The only difference from the frame method of the same name is that
strings are allowed. As the frame tests cover ``is_transform_to``, here
we only test the added string option.
"""
# make example SkyCoord
c = SkyCoord(90 * u.deg, -11 * u.deg)
# iterate through some frames, checking consistency
names = frame_transform_graph.get_names()
for name in names:
frame = frame_transform_graph.lookup_name(name)()
assert c.is_transformable_to(name) == c.is_transformable_to(frame)
def test_transform_to():
for frame in (
FK5(),
FK5(equinox=Time("J1975.0")),
FK4(),
FK4(equinox=Time("J1975.0")),
SkyCoord(RA, DEC, frame="fk4", equinox="J1980"),
):
c_frame = C_ICRS.transform_to(frame)
s_icrs = SkyCoord(RA, DEC, frame="icrs")
s_frame = s_icrs.transform_to(frame)
assert allclose(c_frame.ra, s_frame.ra)
assert allclose(c_frame.dec, s_frame.dec)
assert allclose(c_frame.distance, s_frame.distance)
# set up for parametrized test
rt_sets = []
rt_frames = [ICRS, FK4, FK5, Galactic]
for rt_frame0 in rt_frames:
for rt_frame1 in rt_frames:
for equinox0 in (None, "J1975.0"):
for obstime0 in (None, "J1980.0"):
for equinox1 in (None, "J1975.0"):
for obstime1 in (None, "J1980.0"):
rt_sets.append(
(
rt_frame0,
rt_frame1,
equinox0,
equinox1,
obstime0,
obstime1,
)
)
rt_args = ("frame0", "frame1", "equinox0", "equinox1", "obstime0", "obstime1")
@pytest.mark.parametrize(rt_args, rt_sets)
def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1):
"""
Test round tripping out and back using transform_to in every combination.
"""
attrs0 = {"equinox": equinox0, "obstime": obstime0}
attrs1 = {"equinox": equinox1, "obstime": obstime1}
# Remove None values
attrs0 = {k: v for k, v in attrs0.items() if v is not None}
attrs1 = {k: v for k, v in attrs1.items() if v is not None}
# Go out and back
sc = SkyCoord(RA, DEC, frame=frame0, **attrs0)
# Keep only frame attributes for frame1
attrs1 = {
attr: val for attr, val in attrs1.items() if attr in frame1.frame_attributes
}
sc2 = sc.transform_to(frame1(**attrs1))
# When coming back only keep frame0 attributes for transform_to
attrs0 = {
attr: val for attr, val in attrs0.items() if attr in frame0.frame_attributes
}
# also, if any are None, fill in with defaults
for attrnm in frame0.frame_attributes:
if attrs0.get(attrnm, None) is None:
if attrnm == "obstime" and frame0.get_frame_attr_defaults()[attrnm] is None:
if "equinox" in attrs0:
attrs0[attrnm] = attrs0["equinox"]
else:
attrs0[attrnm] = frame0.get_frame_attr_defaults()[attrnm]
sc_rt = sc2.transform_to(frame0(**attrs0))
if frame0 is Galactic:
assert allclose(sc.l, sc_rt.l)
assert allclose(sc.b, sc_rt.b)
else:
assert allclose(sc.ra, sc_rt.ra)
assert allclose(sc.dec, sc_rt.dec)
if equinox0:
assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox
if obstime0:
assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime
def test_coord_init_string():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord("1d 2d")
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord("1d", "2d")
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord("1°2′3″", "2°3′4″")
assert allclose(sc.ra, Angle("1°2′3″"))
assert allclose(sc.dec, Angle("2°3′4″"))
sc = SkyCoord("1°2′3″ 2°3′4″")
assert allclose(sc.ra, Angle("1°2′3″"))
assert allclose(sc.dec, Angle("2°3′4″"))
with pytest.raises(ValueError) as err:
SkyCoord("1d 2d 3d")
assert "Cannot parse first argument data" in str(err.value)
sc1 = SkyCoord("8 00 00 +5 00 00.0", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc1, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc11 = SkyCoord("8h00m00s+5d00m00.0s", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc11, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc2 = SkyCoord("8 00 -5 00 00.0", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc2, SkyCoord)
assert allclose(sc2.ra, Angle(120 * u.deg))
assert allclose(sc2.dec, Angle(-5 * u.deg))
sc3 = SkyCoord("8 00 -5 00.6", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc3, SkyCoord)
assert allclose(sc3.ra, Angle(120 * u.deg))
assert allclose(sc3.dec, Angle(-5.01 * u.deg))
sc4 = SkyCoord("J080000.00-050036.00", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc4, SkyCoord)
assert allclose(sc4.ra, Angle(120 * u.deg))
assert allclose(sc4.dec, Angle(-5.01 * u.deg))
sc41 = SkyCoord("J080000+050036", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc41, SkyCoord)
assert allclose(sc41.ra, Angle(120 * u.deg))
assert allclose(sc41.dec, Angle(+5.01 * u.deg))
sc5 = SkyCoord("8h00.6m -5d00.6m", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc5, SkyCoord)
assert allclose(sc5.ra, Angle(120.15 * u.deg))
assert allclose(sc5.dec, Angle(-5.01 * u.deg))
sc6 = SkyCoord("8h00.6m -5d00.6m", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc6, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord("8h00.6m-5d00.6m", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord("8h00.6-5d00.6", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg)
assert isinstance(sc7, SkyCoord)
assert allclose(sc7.ra, Angle(187.706 * u.deg))
assert allclose(sc7.dec, Angle(12.406 * u.deg))
with pytest.raises(ValueError):
SkyCoord("8 00 -5 00.6", unit=(u.deg, u.deg), frame="galactic")
def test_coord_init_unit():
"""
Test variations of the unit keyword.
"""
for unit in (
"deg",
"deg,deg",
" deg , deg ",
u.deg,
(u.deg, u.deg),
np.array(["deg", "deg"]),
):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(1 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in (
"hourangle",
"hourangle,hourangle",
" hourangle , hourangle ",
u.hourangle,
[u.hourangle, u.hourangle],
):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(30 * u.deg))
for unit in ("hourangle,deg", (u.hourangle, u.deg)):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ("deg,deg,deg,deg", [u.deg, u.deg, u.deg, u.deg], None):
with pytest.raises(ValueError) as err:
SkyCoord(1, 2, unit=unit)
assert "Unit keyword must have one to three unit values" in str(err.value)
for unit in ("m", (u.m, u.deg), ""):
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, unit=unit)
def test_coord_init_list():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord(
[("1d", "2d"), (1 * u.deg, 2 * u.deg), "1d 2d", ("1°", "2°"), "1° 2°"],
unit="deg",
)
assert allclose(sc.ra, Angle("1d"))
assert allclose(sc.dec, Angle("2d"))
with pytest.raises(ValueError) as err:
SkyCoord(["1d 2d 3d"])
assert "Cannot parse first argument data" in str(err.value)
with pytest.raises(ValueError) as err:
SkyCoord([("1d", "2d", "3d")])
assert "Cannot parse first argument data" in str(err.value)
sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg])
assert allclose(sc.ra, Angle("1d"))
assert allclose(sc.dec, Angle("2d"))
with pytest.raises(
ValueError,
match="One or more elements of input sequence does not have a length",
):
SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec
def test_coord_init_array():
"""
Input in the form of a list array or numpy array
"""
for a in (["1 2", "3 4"], [["1", "2"], ["3", "4"]], [[1, 2], [3, 4]]):
sc = SkyCoord(a, unit="deg")
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
sc = SkyCoord(np.array(a), unit="deg")
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
def test_coord_init_representation():
"""
Spherical or Cartesian representation input coordinates.
"""
coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc)
sc = SkyCoord(coord, frame="icrs")
assert allclose(sc.ra, coord.lon)
assert allclose(sc.dec, coord.lat)
assert allclose(sc.distance, coord.distance)
with pytest.raises(ValueError) as err:
SkyCoord(coord, frame="icrs", ra="1d")
assert "conflicts with keyword argument 'ra'" in str(err.value)
coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one)
sc = SkyCoord(coord, frame="icrs")
sc_cart = sc.represent_as(CartesianRepresentation)
assert allclose(sc_cart.x, 1.0)
assert allclose(sc_cart.y, 2.0)
assert allclose(sc_cart.z, 3.0)
def test_frame_init():
"""
Different ways of providing the frame.
"""
sc = SkyCoord(RA, DEC, frame="icrs")
assert sc.frame.name == "icrs"
sc = SkyCoord(RA, DEC, frame=ICRS)
assert sc.frame.name == "icrs"
sc = SkyCoord(sc)
assert sc.frame.name == "icrs"
sc = SkyCoord(C_ICRS)
assert sc.frame.name == "icrs"
SkyCoord(C_ICRS, frame="icrs")
assert sc.frame.name == "icrs"
with pytest.raises(ValueError) as err:
SkyCoord(C_ICRS, frame="galactic")
assert "Cannot override frame=" in str(err.value)
def test_equal():
obstime = "B1955"
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = SkyCoord([1, 20] * u.deg, [3, 4] * u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 2] * u.km / u.s)
sc2 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 20] * u.km / u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
def test_equal_different_type():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime="B1955")
# Test equals and not equals operators against different types
assert sc1 != "a string"
assert not (sc1 == "a string")
def test_equal_exceptions():
sc1 = SkyCoord(1 * u.deg, 2 * u.deg, obstime="B1955")
sc2 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(
ValueError,
match=(
"cannot compare: extra frame attribute 'obstime' is not equivalent"
r" \(perhaps compare the frames directly to avoid this exception\)"
),
):
sc1 == sc2
# Note that this exception is the only one raised directly in SkyCoord.
# All others come from lower-level classes and are tested in test_frames.py.
def test_attr_inheritance():
"""
When initializing from an existing coord the representation attrs like
equinox should be inherited to the SkyCoord. If there is a conflict
then raise an exception.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults
assert sc2.equinox != sc.equinox
assert sc2.obstime != sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
@pytest.mark.parametrize("frame", ["fk4", "fk5", "icrs"])
def test_setitem_no_velocity(frame):
"""Test different flavors of item setting for a SkyCoord without a velocity
for different frames. Include a frame attribute that is sometimes an
actual frame attribute and sometimes an extra frame attribute.
"""
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime="B1955", frame=frame)
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg, obstime="B1955", frame=frame)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == Time("B1955")
assert sc1.frame.name == frame
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
def test_setitem_initially_broadcast():
sc = SkyCoord(np.ones((2, 1)) * u.deg, np.ones((1, 3)) * u.deg)
sc[1, 1] = SkyCoord(0 * u.deg, 0 * u.deg)
expected = np.ones((2, 3)) * u.deg
expected[1, 1] = 0.0
assert np.all(sc.ra == expected)
assert np.all(sc.dec == expected)
def test_setitem_velocities():
"""Test different flavors of item setting for a SkyCoord with a velocity."""
sc0 = SkyCoord(
[1, 2] * u.deg,
[3, 4] * u.deg,
radial_velocity=[1, 2] * u.km / u.s,
obstime="B1950",
frame="fk4",
)
sc2 = SkyCoord(
[10, 20] * u.deg,
[30, 40] * u.deg,
radial_velocity=[10, 20] * u.km / u.s,
obstime="B1950",
frame="fk4",
)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == Time("B1950")
assert sc1.frame.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
class SkyCoordSub(SkyCoord):
pass
obstime = "B1955"
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, frame="fk4")
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg, frame="fk4", obstime=obstime)
sc1 = SkyCoordSub(sc0)
with pytest.raises(
TypeError,
match="an only set from object of same class: SkyCoordSub vs. SkyCoord",
):
sc1[0] = sc2[0]
sc1 = SkyCoord(sc0.ra, sc0.dec, frame="fk4", obstime="B2001")
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1.frame[0] = sc2.frame[0]
sc1 = SkyCoord(sc0.ra[0], sc0.dec[0], frame="fk4", obstime=obstime)
with pytest.raises(
TypeError, match="scalar 'FK4' frame object does not support item assignment"
):
sc1[0] = sc2[0]
# Different differentials
sc1 = SkyCoord(
[1, 2] * u.deg,
[3, 4] * u.deg,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
)
sc2 = SkyCoord(
[10, 20] * u.deg, [30, 40] * u.deg, radial_velocity=[10, 20] * u.km / u.s
)
with pytest.raises(
TypeError,
match=(
"can only set from object of same class: "
"UnitSphericalCosLatDifferential vs. RadialDifferential"
),
):
sc1[0] = sc2[0]
def test_insert():
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc1 = SkyCoord(5 * u.deg, 6 * u.deg)
sc3 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc5 = SkyCoord([[10, 2], [30, 4]] * u.deg, [[50, 6], [70, 8]] * u.deg)
# Insert a scalar
sc = sc0.insert(1, sc1)
assert skycoord_equal(sc, SkyCoord([1, 5, 2] * u.deg, [3, 6, 4] * u.deg))
# Insert length=2 array at start of array
sc = sc0.insert(0, sc3)
assert skycoord_equal(sc, SkyCoord([10, 20, 1, 2] * u.deg, [30, 40, 3, 4] * u.deg))
# Insert length=2 array at end of array
sc = sc0.insert(2, sc3)
assert skycoord_equal(sc, SkyCoord([1, 2, 10, 20] * u.deg, [3, 4, 30, 40] * u.deg))
# Multidimensional
sc = sc4.insert(1, sc5)
assert skycoord_equal(
sc,
SkyCoord(
[[1, 2], [10, 2], [30, 4], [3, 4]] * u.deg,
[[5, 6], [50, 6], [70, 8], [7, 8]] * u.deg,
),
)
def test_insert_exceptions():
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc1 = SkyCoord(5 * u.deg, 6 * u.deg)
# sc3 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
with pytest.raises(TypeError, match="cannot insert into scalar"):
sc1.insert(0, sc0)
with pytest.raises(ValueError, match="axis must be 0"):
sc0.insert(0, sc1, axis=1)
with pytest.raises(TypeError, match="obj arg must be an integer"):
sc0.insert(slice(None), sc0)
with pytest.raises(
IndexError, match="index -100 is out of bounds for axis 0 with size 2"
):
sc0.insert(-100, sc0)
# Bad shape
with pytest.raises(
ValueError,
match=r"could not broadcast input array from shape \(2,2\) into shape \(2,?\)",
):
sc0.insert(0, sc4)
def test_attr_conflicts():
"""
Check conflicts resolution between coordinate attributes and init kwargs.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox="J1999", obstime="J2001")
# OK because sc.frame doesn't have obstime
SkyCoord(sc.frame, equinox="J1999", obstime="J2100")
# Not OK if attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox="J1999", obstime="J2002")
assert "Coordinate attribute 'obstime'=" in str(err.value)
# Same game but with fk4 which has equinox and obstime frame attrs
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox="J1999", obstime="J2001")
# Not OK if SkyCoord attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox="J1999", obstime="J2002")
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
# Not OK because sc.frame has different attrs
with pytest.raises(ValueError) as err:
SkyCoord(sc.frame, equinox="J1999", obstime="J2002")
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
def test_frame_attr_getattr():
"""
When accessing frame attributes like equinox, the value should come
from self.frame when that object has the relevant attribute, otherwise
from self.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
assert sc.equinox == "J1999" # Just the raw value (not validated)
assert sc.obstime == "J2001"
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
assert sc.equinox == Time("J1999") # Coming from the self.frame object
assert sc.obstime == Time("J2001")
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999")
assert sc.equinox == Time("J1999")
assert sc.obstime == Time("J1999")
def test_to_string():
"""
Basic testing of converting SkyCoord to strings. This just tests
for a single input coordinate and and 1-element list. It does not
test the underlying `Angle.to_string` method itself.
"""
coord = "1h2m3s 1d2m3s"
for wrap in (lambda x: x, lambda x: [x]):
sc = SkyCoord(wrap(coord))
assert sc.to_string() == wrap("15.5125 1.03417")
assert sc.to_string("dms") == wrap("15d30m45s 1d02m03s")
assert sc.to_string("hmsdms") == wrap("01h02m03s +01d02m03s")
with_kwargs = sc.to_string("hmsdms", precision=3, pad=True, alwayssign=True)
assert with_kwargs == wrap("+01h02m03.000s +01d02m03.000s")
@pytest.mark.parametrize("cls_other", [SkyCoord, ICRS])
def test_seps(cls_other):
sc1 = SkyCoord(0 * u.deg, 1 * u.deg)
sc2 = cls_other(0 * u.deg, 2 * u.deg)
sep = sc1.separation(sc2)
assert (sep - 1 * u.deg) / u.deg < 1e-10
with pytest.raises(ValueError):
sc1.separation_3d(sc2)
sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc)
sc4 = cls_other(1 * u.deg, 1 * u.deg, distance=2 * u.kpc)
sep3d = sc3.separation_3d(sc4)
assert sep3d == 1 * u.kpc
def test_repr():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame="icrs")
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame="icrs", distance=1 * u.kpc)
assert repr(sc1) == "<SkyCoord (ICRS): (ra, dec) in deg\n (0., 1.)>"
assert (
repr(sc2)
== "<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)\n (1., 1., 1.)>"
)
sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame="icrs")
assert repr(sc3).startswith("<SkyCoord (ICRS): (ra, dec) in deg\n")
sc_default = SkyCoord(0 * u.deg, 1 * u.deg)
assert repr(sc_default) == "<SkyCoord (ICRS): (ra, dec) in deg\n (0., 1.)>"
def test_repr_altaz():
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame="icrs", distance=1 * u.kpc)
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time("2005-03-21 00:00:00")
sc4 = sc2.transform_to(AltAz(location=loc, obstime=time))
assert repr(sc4).startswith(
"<SkyCoord (AltAz: obstime=2005-03-21 00:00:00.000, "
"location=(-2309223., -3695529., -4641767.) m, pressure=0.0 hPa, "
"temperature=0.0 deg_C, relative_humidity=0.0, obswl=1.0 micron):"
" (az, alt, distance) in (deg, deg, kpc)\n"
)
def test_ops():
"""
Tests miscellaneous operations like `len`
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg, frame="icrs")
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg, frame="icrs")
sc_empty = SkyCoord([] * u.deg, [] * u.deg, frame="icrs")
assert sc.isscalar
assert not sc_arr.isscalar
assert not sc_empty.isscalar
with pytest.raises(TypeError):
len(sc)
assert len(sc_arr) == 2
assert len(sc_empty) == 0
assert bool(sc)
assert bool(sc_arr)
assert not bool(sc_empty)
assert sc_arr[0].isscalar
assert len(sc_arr[:1]) == 1
# A scalar shouldn't be indexable
with pytest.raises(TypeError):
sc[0:]
# but it should be possible to just get an item
sc_item = sc[()]
assert sc_item.shape == ()
# and to turn it into an array
sc_1d = sc[np.newaxis]
assert sc_1d.shape == (1,)
with pytest.raises(TypeError):
iter(sc)
assert not isiterable(sc)
assert isiterable(sc_arr)
assert isiterable(sc_empty)
it = iter(sc_arr)
assert next(it).dec == sc_arr[0].dec
assert next(it).dec == sc_arr[1].dec
with pytest.raises(StopIteration):
next(it)
def test_none_transform():
"""
Ensure that transforming from a SkyCoord with no frame provided works like
ICRS
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg)
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg)
sc2 = sc.transform_to(ICRS)
assert sc.ra == sc2.ra and sc.dec == sc2.dec
sc5 = sc.transform_to("fk5")
assert sc5.ra == sc2.transform_to("fk5").ra
sc_arr2 = sc_arr.transform_to(ICRS)
sc_arr5 = sc_arr.transform_to("fk5")
npt.assert_array_equal(sc_arr5.ra, sc_arr2.transform_to("fk5").ra)
def test_position_angle():
c1 = SkyCoord(0 * u.deg, 0 * u.deg)
c2 = SkyCoord(1 * u.deg, 0 * u.deg)
assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0 * u.deg)
c3 = SkyCoord(1 * u.deg, 0.1 * u.deg)
assert c1.position_angle(c3) < 90 * u.deg
c4 = SkyCoord(0 * u.deg, 1 * u.deg)
assert_allclose(c1.position_angle(c4), 0 * u.deg)
carr1 = SkyCoord(0 * u.deg, [0, 1, 2] * u.deg)
carr2 = SkyCoord([-1, -2, -3] * u.deg, [0.1, 1.1, 2.1] * u.deg)
res = carr1.position_angle(carr2)
assert res.shape == (3,)
assert np.all(res < 360 * u.degree)
assert np.all(res > 270 * u.degree)
cicrs = SkyCoord(0 * u.deg, 0 * u.deg, frame="icrs")
cfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5")
# because of the frame transform, it's just a *bit* more than 90 degrees
assert cicrs.position_angle(cfk5) > 90.0 * u.deg
assert cicrs.position_angle(cfk5) < 91.0 * u.deg
def test_position_angle_directly():
"""Regression check for #3800: position_angle should accept floats."""
from astropy.coordinates.angle_utilities import position_angle
result = position_angle(10.0, 20.0, 10.0, 20.0)
assert result.unit is u.radian
assert result.value == 0.0
def test_sep_pa_equivalence():
"""Regression check for bug in #5702.
PA and separation from object 1 to 2 should be consistent with those
from 2 to 1
"""
cfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5")
cfk5B1950 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5", equinox="B1950")
# test with both default and explicit equinox #5722 and #3106
sep_forward = cfk5.separation(cfk5B1950)
sep_backward = cfk5B1950.separation(cfk5)
assert sep_forward != 0 and sep_backward != 0
assert_allclose(sep_forward, sep_backward)
posang_forward = cfk5.position_angle(cfk5B1950)
posang_backward = cfk5B1950.position_angle(cfk5)
assert posang_forward != 0 and posang_backward != 0
assert 179 < (posang_forward - posang_backward).wrap_at(360 * u.deg).degree < 181
dcfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5", distance=1 * u.pc)
dcfk5B1950 = SkyCoord(
1 * u.deg, 0 * u.deg, frame="fk5", equinox="B1950", distance=1.0 * u.pc
)
sep3d_forward = dcfk5.separation_3d(dcfk5B1950)
sep3d_backward = dcfk5B1950.separation_3d(dcfk5)
assert sep3d_forward != 0 and sep3d_backward != 0
assert_allclose(sep3d_forward, sep3d_backward)
def test_directional_offset_by():
# Round-trip tests: where is sc2 from sc1?
# Use those offsets from sc1 and verify you get to sc2.
npoints = 7 # How many points when doing vectors of SkyCoords
for sc1 in [
SkyCoord(0 * u.deg, -90 * u.deg), # South pole
SkyCoord(0 * u.deg, 90 * u.deg), # North pole
SkyCoord(1 * u.deg, 2 * u.deg),
SkyCoord(
np.linspace(0, 359, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="fk4",
),
SkyCoord(
np.linspace(359, 0, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="icrs",
),
SkyCoord(
np.linspace(-3, 3, npoints),
np.linspace(-90, 90, npoints),
unit=(u.rad, u.deg),
frame="barycentricmeanecliptic",
),
]:
for sc2 in [
SkyCoord(5 * u.deg, 10 * u.deg),
SkyCoord(
np.linspace(0, 359, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="galactic",
),
]:
# Find the displacement from sc1 to sc2,
posang = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
# then do the offset from sc1 and verify that you are at sc2
sc2a = sc1.directional_offset_by(position_angle=posang, separation=sep)
assert np.max(np.abs(sc2.separation(sc2a).arcsec)) < 1e-3
# Specific test cases
# Go over the North pole a little way, and
# over the South pole a long way, to get to same spot
sc1 = SkyCoord(0 * u.deg, 89 * u.deg)
for posang, sep in [(0 * u.deg, 2 * u.deg), (180 * u.deg, 358 * u.deg)]:
sc2 = sc1.directional_offset_by(posang, sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 89])
# Go twice as far to ensure that dec is actually changing
# and that >360deg is supported
sc2 = sc1.directional_offset_by(posang, 2 * sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 87])
# Verify that a separation of 180 deg in any direction gets to the antipode
# and 360 deg returns to start
sc1 = SkyCoord(10 * u.deg, 47 * u.deg)
for posang in np.linspace(0, 377, npoints):
sc2 = sc1.directional_offset_by(posang, 180 * u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [190, -47])
sc2 = sc1.directional_offset_by(posang, 360 * u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [10, 47])
# Verify that a 90 degree posang, which means East
# corresponds to an increase in RA, by ~separation/cos(dec) and
# a slight convergence to equator
sc1 = SkyCoord(10 * u.deg, 60 * u.deg)
sc2 = sc1.directional_offset_by(90 * u.deg, 1.0 * u.deg)
assert 11.9 < sc2.ra.degree < 12.0
assert 59.9 < sc2.dec.degree < 60.0
def test_table_to_coord():
"""
Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity`
initializer is the intermediary that translate the table columns into
something coordinates understands.
(Regression test for #1762 )
"""
from astropy.table import Column, Table
t = Table()
t.add_column(Column(data=[1, 2, 3], name="ra", unit=u.deg))
t.add_column(Column(data=[4, 5, 6], name="dec", unit=u.deg))
c = SkyCoord(t["ra"], t["dec"])
assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg)
assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg)
def assert_quantities_allclose(coord, q1s, attrs):
"""
Compare two tuples of quantities. This assumes that the values in q1 are of
order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the
two quantities are the *same*, in order to check that the representation
output has the expected units.
"""
q2s = [getattr(coord, attr) for attr in attrs]
assert len(q1s) == len(q2s)
for q1, q2 in zip(q1s, q2s):
assert q1.shape == q2.shape
assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit)
# Sets of inputs corresponding to Galactic frame
base_unit_attr_sets = [
("spherical", u.karcsec, u.karcsec, u.kpc, Latitude, "l", "b", "distance"),
("unitspherical", u.karcsec, u.karcsec, None, Latitude, "l", "b", None),
("physicsspherical", u.karcsec, u.karcsec, u.kpc, Angle, "phi", "theta", "r"),
("cartesian", u.km, u.km, u.km, u.Quantity, "u", "v", "w"),
("cylindrical", u.km, u.karcsec, u.km, Angle, "rho", "phi", "z"),
]
units_attr_sets = []
for base_unit_attr_set in base_unit_attr_sets:
repr_name = base_unit_attr_set[0]
for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]):
for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])):
for arrayify in True, False:
if arrayify:
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
units_attr_sets.append(
base_unit_attr_set + (representation, c1, c2, c3)
)
units_attr_args = (
"repr_name",
"unit1",
"unit2",
"unit3",
"cls2",
"attr1",
"attr2",
"attr3",
"representation",
"c1",
"c2",
"c3",
)
@pytest.mark.parametrize(
units_attr_args, [x for x in units_attr_sets if x[0] != "unitspherical"]
)
def test_skycoord_three_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(
c1,
c2,
c3,
unit=(unit1, unit2, unit3),
representation_type=representation,
frame=Galactic,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
sc = SkyCoord(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
1000 * c3 * u.Unit(unit3 / 1000),
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr3: c3}
sc = SkyCoord(
c1,
c2,
unit=(unit1, unit2, unit3),
frame=Galactic,
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr1: c1, attr2: c2, attr3: c3}
sc = SkyCoord(
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
@pytest.mark.parametrize(
units_attr_args,
[x for x in units_attr_sets if x[0] in ("spherical", "unitspherical")],
)
def test_skycoord_spherical_two_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(
c1, c2, unit=(unit1, unit2), frame=Galactic, representation_type=representation
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
sc = SkyCoord(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
kwargs = {attr1: c1, attr2: c2}
sc = SkyCoord(
frame=Galactic,
unit=(unit1, unit2),
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
@pytest.mark.parametrize(
units_attr_args, [x for x in units_attr_sets if x[0] != "unitspherical"]
)
def test_galactic_three_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = Galactic(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
1000 * c3 * u.Unit(unit3 / 1000),
representation_type=representation,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr3: c3 * unit3}
sc = Galactic(c1 * unit1, c2 * unit2, representation_type=representation, **kwargs)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr1: c1 * unit1, attr2: c2 * unit2, attr3: c3 * unit3}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
@pytest.mark.parametrize(
units_attr_args,
[x for x in units_attr_sets if x[0] in ("spherical", "unitspherical")],
)
def test_galactic_spherical_two_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = Galactic(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
representation_type=representation,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
sc = Galactic(c1 * unit1, c2 * unit2, representation_type=representation)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
kwargs = {attr1: c1 * unit1, attr2: c2 * unit2}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
@pytest.mark.parametrize(
("repr_name", "unit1", "unit2", "unit3", "cls2", "attr1", "attr2", "attr3"),
[x for x in base_unit_attr_sets if x[0] != "unitspherical"],
)
def test_skycoord_coordinate_input(
repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3
):
c1, c2, c3 = 1, 2, 3
sc = SkyCoord(
[(c1, c2, c3)],
unit=(unit1, unit2, unit3),
representation_type=repr_name,
frame="galactic",
)
assert_quantities_allclose(
sc, ([c1] * unit1, [c2] * unit2, [c3] * unit3), (attr1, attr2, attr3)
)
c1, c2, c3 = 1 * unit1, 2 * unit2, 3 * unit3
sc = SkyCoord([(c1, c2, c3)], representation_type=repr_name, frame="galactic")
assert_quantities_allclose(
sc, ([1] * unit1, [2] * unit2, [3] * unit3), (attr1, attr2, attr3)
)
def test_skycoord_string_coordinate_input():
sc = SkyCoord("01 02 03 +02 03 04", unit="deg", representation_type="unitspherical")
assert_quantities_allclose(
sc,
(Angle("01:02:03", unit="deg"), Angle("02:03:04", unit="deg")),
("ra", "dec"),
)
sc = SkyCoord(
["01 02 03 +02 03 04"], unit="deg", representation_type="unitspherical"
)
assert_quantities_allclose(
sc,
(Angle(["01:02:03"], unit="deg"), Angle(["02:03:04"], unit="deg")),
("ra", "dec"),
)
def test_units():
sc = SkyCoord(1, 2, 3, unit="m", representation_type="cartesian") # All get meters
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
# All get u.m
sc = SkyCoord(1, 2 * u.km, 3, unit="m", representation_type="cartesian")
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit=u.m, representation_type="cartesian") # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit="m, km, pc", representation_type="cartesian")
assert_quantities_allclose(sc, (1 * u.m, 2 * u.km, 3 * u.pc), ("x", "y", "z"))
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, 3, unit=(u.m, u.m), representation_type="cartesian")
assert "should have matching physical types" in str(err.value)
SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation_type="cartesian")
assert_quantities_allclose(sc, (1 * u.m, 2 * u.km, 3 * u.pc), ("x", "y", "z"))
@pytest.mark.xfail
def test_units_known_fail():
# should fail but doesn't => corner case oddity
with pytest.raises(u.UnitsError):
SkyCoord(1, 2, 3, unit=u.deg, representation_type="spherical")
def test_nodata_failure():
with pytest.raises(ValueError):
SkyCoord()
@pytest.mark.parametrize(("mode", "origin"), [("wcs", 0), ("all", 0), ("all", 1)])
def test_wcs_methods(mode, origin):
from astropy.utils.data import get_pkg_data_contents
from astropy.wcs import WCS
from astropy.wcs.utils import pixel_to_skycoord
header = get_pkg_data_contents(
"../../wcs/tests/data/maps/1904-66_TAN.hdr", encoding="binary"
)
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89.0 * u.deg, frame="icrs")
xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to("icrs")
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# also try to round-trip with `from_pixel`
scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to(
"icrs"
)
assert_allclose(scnew.ra.degree, ref.ra.degree)
assert_allclose(scnew.dec.degree, ref.dec.degree)
# Also make sure the right type comes out
class SkyCoord2(SkyCoord):
pass
scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin)
assert scnew.__class__ is SkyCoord
assert scnew2.__class__ is SkyCoord2
def test_frame_attr_transform_inherit():
"""
Test that frame attributes get inherited as expected during transform.
Driven by #3106.
"""
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5)
c2 = c.transform_to(FK4)
assert c2.equinox.value == "B1950.000"
assert c2.obstime.value == "B1950.000"
c2 = c.transform_to(FK4(equinox="J1975", obstime="J1980"))
assert c2.equinox.value == "J1975.000"
assert c2.obstime.value == "J1980.000"
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4)
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J2000.000"
assert c2.obstime is None
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime="J1980")
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J2000.000"
assert c2.obstime.value == "J1980.000"
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox="J1975", obstime="J1980")
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J1975.000"
assert c2.obstime.value == "J1980.000"
c2 = c.transform_to(FK5(equinox="J1990"))
assert c2.equinox.value == "J1990.000"
assert c2.obstime.value == "J1980.000"
# The work-around for #5722
c = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5")
c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", equinox="B1950.000")
c2 = c1.transform_to(c)
assert not c2.is_equivalent_frame(c) # counterintuitive, but documented
assert c2.equinox.value == "B1950.000"
c3 = c1.transform_to(c, merge_attributes=False)
assert c3.equinox.value == "J2000.000"
assert c3.is_equivalent_frame(c)
def test_deepcopy():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
c2 = copy.copy(c1)
c3 = copy.deepcopy(c1)
c4 = SkyCoord(
[1, 2] * u.m,
[2, 3] * u.m,
[3, 4] * u.m,
representation_type="cartesian",
frame="fk5",
obstime="J1999.9",
equinox="J1988.8",
)
c5 = copy.deepcopy(c4)
assert np.all(c5.x == c4.x) # and y and z
assert c5.frame.name == c4.frame.name
assert c5.obstime == c4.obstime
assert c5.equinox == c4.equinox
assert c5.representation_type == c4.representation_type
def test_no_copy():
c1 = SkyCoord(np.arange(10.0) * u.hourangle, np.arange(20.0, 30.0) * u.deg)
c2 = SkyCoord(c1, copy=False)
# Note: c1.ra and c2.ra will *not* share memory, as these are recalculated
# to be in "preferred" units. See discussion in #4883.
assert np.may_share_memory(c1.data.lon, c2.data.lon)
c3 = SkyCoord(c1, copy=True)
assert not np.may_share_memory(c1.data.lon, c3.data.lon)
def test_immutable():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(AttributeError):
c1.ra = 3.0
c1.foo = 42
assert c1.foo == 42
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_search_around():
"""
Test the search_around_* methods
Here we don't actually test the values are right, just that the methods of
SkyCoord work. The accuracy tests are in ``test_matching.py``
"""
from astropy.utils import NumpyRNGContext
with NumpyRNGContext(987654321):
sc1 = SkyCoord(
np.random.rand(20) * 360.0 * u.degree,
(np.random.rand(20) * 180.0 - 90.0) * u.degree,
)
sc2 = SkyCoord(
np.random.rand(100) * 360.0 * u.degree,
(np.random.rand(100) * 180.0 - 90.0) * u.degree,
)
sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20) * u.kpc)
sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100) * u.kpc)
idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10 * u.deg)
idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250 * u.pc)
def test_init_with_frame_instance_keyword():
# Frame instance
c1 = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox="J2010"))
assert c1.equinox == Time("J2010")
# Frame instance with data (data gets ignored)
c2 = SkyCoord(
3 * u.deg, 4 * u.deg, frame=FK5(1.0 * u.deg, 2 * u.deg, equinox="J2010")
)
assert c2.equinox == Time("J2010")
assert allclose(c2.ra.degree, 3)
assert allclose(c2.dec.degree, 4)
# SkyCoord instance
c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1)
assert c3.equinox == Time("J2010")
# Check duplicate arguments
with pytest.raises(ValueError) as err:
c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox="J2010"), equinox="J2001")
assert "Cannot specify frame attribute 'equinox'" in str(err.value)
def test_guess_from_table():
from astropy.table import Column, Table
from astropy.utils import NumpyRNGContext
tab = Table()
with NumpyRNGContext(987654321):
tab.add_column(Column(data=np.random.rand(10), unit="deg", name="RA[J2000]"))
tab.add_column(Column(data=np.random.rand(10), unit="deg", name="DEC[J2000]"))
sc = SkyCoord.guess_from_table(tab)
npt.assert_array_equal(sc.ra.deg, tab["RA[J2000]"])
npt.assert_array_equal(sc.dec.deg, tab["DEC[J2000]"])
# try without units in the table
tab["RA[J2000]"].unit = None
tab["DEC[J2000]"].unit = None
# should fail if not given explicitly
with pytest.raises(u.UnitsError):
sc2 = SkyCoord.guess_from_table(tab)
# but should work if provided
sc2 = SkyCoord.guess_from_table(tab, unit=u.deg)
npt.assert_array_equal(sc2.ra.deg, tab["RA[J2000]"])
npt.assert_array_equal(sc2.dec.deg, tab["DEC[J2000]"])
# should fail if two options are available - ambiguity bad!
tab.add_column(Column(data=np.random.rand(10), name="RA_J1900"))
with pytest.raises(ValueError) as excinfo:
SkyCoord.guess_from_table(tab, unit=u.deg)
assert "J1900" in excinfo.value.args[0] and "J2000" in excinfo.value.args[0]
tab.remove_column("RA_J1900")
tab["RA[J2000]"].unit = u.deg
tab["DEC[J2000]"].unit = u.deg
# but should succeed if the ambiguity can be broken b/c one of the matches
# is the name of a different component
tab.add_column(Column(data=np.random.rand(10) * u.mas / u.yr, name="pm_ra_cosdec"))
tab.add_column(Column(data=np.random.rand(10) * u.mas / u.yr, name="pm_dec"))
sc3 = SkyCoord.guess_from_table(tab)
assert u.allclose(sc3.ra, tab["RA[J2000]"])
assert u.allclose(sc3.dec, tab["DEC[J2000]"])
assert u.allclose(sc3.pm_ra_cosdec, tab["pm_ra_cosdec"])
assert u.allclose(sc3.pm_dec, tab["pm_dec"])
# should fail if stuff doesn't have proper units
tab["RA[J2000]"].unit = None
tab["DEC[J2000]"].unit = None
with pytest.raises(u.UnitTypeError, match="no unit was given."):
SkyCoord.guess_from_table(tab)
tab.remove_column("pm_ra_cosdec")
tab.remove_column("pm_dec")
# should also fail if user specifies something already in the table, but
# should succeed even if the user has to give one of the components
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tab, ra=tab["RA[J2000]"], unit=u.deg)
oldra = tab["RA[J2000]"]
tab.remove_column("RA[J2000]")
sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg)
npt.assert_array_equal(sc3.ra.deg, oldra)
npt.assert_array_equal(sc3.dec.deg, tab["DEC[J2000]"])
# check a few non-ICRS/spherical systems
x, y, z = np.arange(3).reshape(3, 1) * u.pc
l, b = np.arange(2).reshape(2, 1) * u.deg
tabcart = Table([x, y, z], names=("x", "y", "z"))
tabgal = Table([b, l], names=("b", "l"))
sc_cart = SkyCoord.guess_from_table(tabcart, representation_type="cartesian")
npt.assert_array_equal(sc_cart.x, x)
npt.assert_array_equal(sc_cart.y, y)
npt.assert_array_equal(sc_cart.z, z)
sc_gal = SkyCoord.guess_from_table(tabgal, frame="galactic")
npt.assert_array_equal(sc_gal.l, l)
npt.assert_array_equal(sc_gal.b, b)
# also try some column names that *end* with the attribute name
tabgal["b"].name = "gal_b"
tabgal["l"].name = "gal_l"
SkyCoord.guess_from_table(tabgal, frame="galactic")
tabgal["gal_b"].name = "blob"
tabgal["gal_l"].name = "central"
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tabgal, frame="galactic")
def test_skycoord_list_creation():
"""
Test that SkyCoord can be created in a reasonable way with lists of SkyCoords
(regression for #2702)
"""
sc = SkyCoord(ra=[1, 2, 3] * u.deg, dec=[4, 5, 6] * u.deg)
sc0 = sc[0]
sc2 = sc[2]
scnew = SkyCoord([sc0, sc2])
assert np.all(scnew.ra == [1, 3] * u.deg)
assert np.all(scnew.dec == [4, 6] * u.deg)
# also check ranges
sc01 = sc[:2]
scnew2 = SkyCoord([sc01, sc2])
assert np.all(scnew2.ra == sc.ra)
assert np.all(scnew2.dec == sc.dec)
# now try with a mix of skycoord, frame, and repr objects
frobj = ICRS(2 * u.deg, 5 * u.deg)
reprobj = UnitSphericalRepresentation(3 * u.deg, 6 * u.deg)
scnew3 = SkyCoord([sc0, frobj, reprobj])
assert np.all(scnew3.ra == sc.ra)
assert np.all(scnew3.dec == sc.dec)
# should *fail* if different frame attributes or types are passed in
scfk5_j2000 = SkyCoord(1 * u.deg, 4 * u.deg, frame="fk5")
with pytest.raises(ValueError):
SkyCoord([sc0, scfk5_j2000])
scfk5_j2010 = SkyCoord(1 * u.deg, 4 * u.deg, frame="fk5", equinox="J2010")
with pytest.raises(ValueError):
SkyCoord([scfk5_j2000, scfk5_j2010])
# but they should inherit if they're all consistent
scfk5_2_j2010 = SkyCoord(2 * u.deg, 5 * u.deg, frame="fk5", equinox="J2010")
scfk5_3_j2010 = SkyCoord(3 * u.deg, 6 * u.deg, frame="fk5", equinox="J2010")
scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010])
assert np.all(scnew4.ra == sc.ra)
assert np.all(scnew4.dec == sc.dec)
assert scnew4.equinox == Time("J2010")
def test_nd_skycoord_to_string():
c = SkyCoord(np.ones((2, 2)), 1, unit=("deg", "deg"))
ts = c.to_string()
assert np.all(ts.shape == c.shape)
assert np.all(ts == "1 1")
def test_equiv_skycoord():
sci1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
sci2 = SkyCoord(1 * u.deg, 3 * u.deg, frame="icrs")
assert sci1.is_equivalent_frame(sci1)
assert sci1.is_equivalent_frame(sci2)
assert sci1.is_equivalent_frame(ICRS())
assert not sci1.is_equivalent_frame(FK5())
with pytest.raises(TypeError):
sci1.is_equivalent_frame(10)
scf1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5")
scf2 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", equinox="J2005")
# obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come
# to come out different because they're part of SkyCoord
scf3 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", obstime="J2005")
assert scf1.is_equivalent_frame(scf1)
assert not scf1.is_equivalent_frame(sci1)
assert scf1.is_equivalent_frame(FK5())
assert not scf1.is_equivalent_frame(scf2)
assert scf2.is_equivalent_frame(FK5(equinox="J2005"))
assert not scf3.is_equivalent_frame(scf1)
assert not scf3.is_equivalent_frame(FK5(equinox="J2005"))
def test_equiv_skycoord_with_extra_attrs():
"""Regression test for #10658."""
# GCRS has a CartesianRepresentationAttribute called obsgeoloc
gcrs = GCRS(
1 * u.deg, 2 * u.deg, obsgeoloc=CartesianRepresentation([1, 2, 3], unit=u.m)
)
# Create a SkyCoord where obsgeoloc tags along as an extra attribute
sc1 = SkyCoord(gcrs).transform_to(ICRS)
# Now create a SkyCoord with an equivalent frame but without the extra attribute
sc2 = SkyCoord(sc1.frame)
# The SkyCoords are therefore not equivalent, but check both directions
assert not sc1.is_equivalent_frame(sc2)
# This way around raised a TypeError which is fixed by #10658
assert not sc2.is_equivalent_frame(sc1)
def test_constellations():
# the actual test for accuracy is in test_funcs - this is just meant to make
# sure we get sensible answers
sc = SkyCoord(135 * u.deg, 65 * u.deg)
assert sc.get_constellation() == "Ursa Major"
assert sc.get_constellation(short_name=True) == "UMa"
scs = SkyCoord([135] * 2 * u.deg, [65] * 2 * u.deg)
npt.assert_equal(scs.get_constellation(), ["Ursa Major"] * 2)
npt.assert_equal(scs.get_constellation(short_name=True), ["UMa"] * 2)
@pytest.mark.remote_data
def test_constellations_with_nameresolve():
assert SkyCoord.from_name("And I").get_constellation(short_name=True) == "And"
# you'd think "And ..." should be in Andromeda. But you'd be wrong.
assert SkyCoord.from_name("And VI").get_constellation() == "Pegasus"
# maybe it's because And VI isn't really a galaxy?
assert SkyCoord.from_name("And XXII").get_constellation() == "Pisces"
assert SkyCoord.from_name("And XXX").get_constellation() == "Cassiopeia"
# ok maybe not
# ok, but at least some of the others do make sense...
assert (
SkyCoord.from_name("Coma Cluster").get_constellation(short_name=True) == "Com"
)
assert SkyCoord.from_name("Orion Nebula").get_constellation() == "Orion"
assert SkyCoord.from_name("Triangulum Galaxy").get_constellation() == "Triangulum"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg)
sc.representation_type = "cartesian"
assert sc[0].representation_type is CartesianRepresentation
def test_spherical_offsets_to_api():
i00 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame="icrs")
fk5 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame="fk5")
with pytest.raises(ValueError):
# different frames should fail
i00.spherical_offsets_to(fk5)
i1deg = ICRS(1 * u.deg, 1 * u.deg)
dra, ddec = i00.spherical_offsets_to(i1deg)
assert_allclose(dra, 1 * u.deg)
assert_allclose(ddec, 1 * u.deg)
# make sure an abbreviated array-based version of the above also works
i00s = SkyCoord([0] * 4 * u.arcmin, [0] * 4 * u.arcmin, frame="icrs")
i01s = SkyCoord([0] * 4 * u.arcmin, np.arange(4) * u.arcmin, frame="icrs")
dra, ddec = i00s.spherical_offsets_to(i01s)
assert_allclose(dra, 0 * u.arcmin)
assert_allclose(ddec, np.arange(4) * u.arcmin)
@pytest.mark.parametrize("frame", ["icrs", "galactic"])
@pytest.mark.parametrize(
"comparison_data",
[
(0 * u.arcmin, 1 * u.arcmin),
(1 * u.arcmin, 0 * u.arcmin),
(1 * u.arcmin, 1 * u.arcmin),
],
)
def test_spherical_offsets_roundtrip(frame, comparison_data):
i00 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame=frame)
comparison = SkyCoord(*comparison_data, frame=frame)
dlon, dlat = i00.spherical_offsets_to(comparison)
assert_allclose(dlon, comparison.data.lon)
assert_allclose(dlat, comparison.data.lat)
i00_back = comparison.spherical_offsets_by(-dlon, -dlat)
# This reaches machine precision when only one component is changed, but for
# the third parametrized case (both lon and lat change), the transformation
# will have finite accuracy:
assert_allclose(i00_back.data.lon, i00.data.lon, atol=1e-10 * u.rad)
assert_allclose(i00_back.data.lat, i00.data.lat, atol=1e-10 * u.rad)
# Test roundtripping the other direction:
init_c = SkyCoord(40.0 * u.deg, 40.0 * u.deg, frame=frame)
new_c = init_c.spherical_offsets_by(3.534 * u.deg, 2.2134 * u.deg)
dlon, dlat = new_c.spherical_offsets_to(init_c)
back_c = new_c.spherical_offsets_by(dlon, dlat)
assert init_c.separation(back_c) < 1e-10 * u.deg
def test_frame_attr_changes():
"""
This tests the case where a frame is added with a new frame attribute after
a SkyCoord has been created. This is necessary because SkyCoords get the
attributes set at creation time, but the set of attributes can change as
frames are added or removed from the transform graph. This makes sure that
everything continues to work consistently.
"""
sc_before = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
assert "fakeattr" not in dir(sc_before)
class FakeFrame(BaseCoordinateFrame):
fakeattr = Attribute()
# doesn't matter what this does as long as it just puts the frame in the
# transform graph
transset = (ICRS, FakeFrame, lambda c, f: c)
frame_transform_graph.add_transform(*transset)
try:
assert "fakeattr" in dir(sc_before)
assert sc_before.fakeattr is None
sc_after1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
assert "fakeattr" in dir(sc_after1)
assert sc_after1.fakeattr is None
sc_after2 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs", fakeattr=1)
assert sc_after2.fakeattr == 1
finally:
frame_transform_graph.remove_transform(*transset)
assert "fakeattr" not in dir(sc_before)
assert "fakeattr" not in dir(sc_after1)
assert "fakeattr" not in dir(sc_after2)
def test_cache_clear_sc():
from astropy.coordinates import SkyCoord
i = SkyCoord(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache["representation"]) == 2
i.cache.clear()
assert len(i.cache["representation"]) == 0
def test_set_attribute_exceptions():
"""Ensure no attrbute for any frame can be set directly.
Though it is fine if the current frame does not have it."""
sc = SkyCoord(1.0 * u.deg, 2.0 * u.deg, frame="fk5")
assert hasattr(sc.frame, "equinox")
with pytest.raises(AttributeError):
sc.equinox = "B1950"
assert sc.relative_humidity is None
sc.relative_humidity = 0.5
assert sc.relative_humidity == 0.5
assert not hasattr(sc.frame, "relative_humidity")
def test_extra_attributes():
"""Ensure any extra attributes are dealt with correctly.
Regression test against #5743.
"""
obstime_string = ["2017-01-01T00:00", "2017-01-01T00:10"]
obstime = Time(obstime_string)
sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string)
assert not hasattr(sc.frame, "obstime")
assert type(sc.obstime) is Time
assert sc.obstime.shape == (2,)
assert np.all(sc.obstime == obstime)
# ensure equivalency still works for more than one obstime.
assert sc.is_equivalent_frame(sc)
sc_1 = sc[1]
assert sc_1.obstime == obstime[1]
# Transforming to FK4 should use sc.obstime.
sc_fk4 = sc.transform_to("fk4")
assert np.all(sc_fk4.frame.obstime == obstime)
# And transforming back should not loose it.
sc2 = sc_fk4.transform_to("icrs")
assert not hasattr(sc2.frame, "obstime")
assert np.all(sc2.obstime == obstime)
# Ensure obstime get taken from the SkyCoord if passed in directly.
# (regression test for #5749).
sc3 = SkyCoord([0.0, 1.0], [2.0, 3.0], unit="deg", frame=sc)
assert np.all(sc3.obstime == obstime)
# Finally, check that we can delete such attributes.
del sc3.obstime
assert sc3.obstime is None
def test_apply_space_motion():
# use this 12 year period because it's a multiple of 4 to avoid the quirks
# of leap years while having 2 leap seconds in it
t1 = Time("2000-01-01T00:00")
t2 = Time("2012-01-01T00:00")
# Check a very simple case first:
frame = ICRS(
ra=10.0 * u.deg,
dec=0 * u.deg,
distance=10.0 * u.pc,
pm_ra_cosdec=0.1 * u.deg / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
# Cases that should work (just testing input for now):
c1 = SkyCoord(frame, obstime=t1, pressure=101 * u.kPa)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied1 = c1.apply_space_motion(new_obstime=t2)
applied2 = c1.apply_space_motion(dt=12 * u.year)
assert isinstance(applied1.frame, c1.frame.__class__)
assert isinstance(applied2.frame, c1.frame.__class__)
assert_allclose(applied1.ra, applied2.ra)
assert_allclose(applied1.pm_ra_cosdec, applied2.pm_ra_cosdec)
assert_allclose(applied1.dec, applied2.dec)
assert_allclose(applied1.distance, applied2.distance)
# ensure any frame attributes that were there before get passed through
assert applied1.pressure == c1.pressure
# there were 2 leap seconds between 2000 and 2010, so the difference in
# the two forms of time evolution should be ~2 sec
adt = np.abs(applied2.obstime - applied1.obstime)
assert 1.9 * u.second < adt.to(u.second) < 2.1 * u.second
c2 = SkyCoord(frame)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied3 = c2.apply_space_motion(dt=6 * u.year)
assert isinstance(applied3.frame, c1.frame.__class__)
assert applied3.obstime is None
# this should *not* be .6 deg due to space-motion on a sphere, but it
# should be fairly close
assert 0.5 * u.deg < applied3.ra - c1.ra < 0.7 * u.deg
# the two cases should only match somewhat due to it being space motion, but
# they should be at least this close
assert quantity_allclose(
applied1.ra - c1.ra, (applied3.ra - c1.ra) * 2, atol=1e-3 * u.deg
)
# but *not* this close
assert not quantity_allclose(
applied1.ra - c1.ra, (applied3.ra - c1.ra) * 2, atol=1e-4 * u.deg
)
with pytest.raises(ValueError):
c2.apply_space_motion(new_obstime=t2)
def test_custom_frame_skycoord():
# also regression check for the case from #7069
class BlahBleeBlopFrame(BaseCoordinateFrame):
default_representation = SphericalRepresentation
# without a differential, SkyCoord creation fails
# default_differential = SphericalDifferential
_frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "lon", "recommended"),
RepresentationMapping("lat", "lat", "recommended"),
RepresentationMapping("distance", "radius", "recommended"),
]
}
SkyCoord(lat=1 * u.deg, lon=2 * u.deg, frame=BlahBleeBlopFrame)
def test_user_friendly_pm_error():
"""
This checks that a more user-friendly error message is raised for the user
if they pass, e.g., pm_ra instead of pm_ra_cosdec
"""
with pytest.raises(ValueError) as e:
SkyCoord(
ra=150 * u.deg,
dec=-11 * u.deg,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert "pm_ra_cosdec" in str(e.value)
with pytest.raises(ValueError) as e:
SkyCoord(
l=150 * u.deg,
b=-11 * u.deg,
pm_l=100 * u.mas / u.yr,
pm_b=10 * u.mas / u.yr,
frame="galactic",
)
assert "pm_l_cosb" in str(e.value)
# The special error should not turn on here:
with pytest.raises(ValueError) as e:
SkyCoord(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
representation_type="cartesian",
)
assert "pm_ra_cosdec" not in str(e.value)
def test_contained_by():
"""
Test Skycoord.contained(wcs,image)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
test_wcs = WCS(fits.Header.fromstring(header.strip(), "\n"))
assert SkyCoord(254, 2, unit="deg").contained_by(test_wcs)
assert not SkyCoord(240, 2, unit="deg").contained_by(test_wcs)
img = np.zeros((2136, 2078))
assert SkyCoord(250, 2, unit="deg").contained_by(test_wcs, img)
assert not SkyCoord(240, 2, unit="deg").contained_by(test_wcs, img)
ra = np.array([254.2, 254.1])
dec = np.array([2, 12.1])
coords = SkyCoord(ra, dec, unit="deg")
assert np.all(test_wcs.footprint_contains(coords) == np.array([True, False]))
def test_none_differential_type():
"""
This is a regression test for #8021
"""
from astropy.coordinates import BaseCoordinateFrame
class MockHeliographicStonyhurst(BaseCoordinateFrame):
default_representation = SphericalRepresentation
frame_specific_representation_info = {
SphericalRepresentation: [
RepresentationMapping(
reprname="lon", framename="lon", defaultunit=u.deg
),
RepresentationMapping(
reprname="lat", framename="lat", defaultunit=u.deg
),
RepresentationMapping(
reprname="distance", framename="radius", defaultunit=None
),
]
}
fr = MockHeliographicStonyhurst(lon=1 * u.deg, lat=2 * u.deg, radius=10 * u.au)
SkyCoord(0 * u.deg, fr.lat, fr.radius, frame=fr) # this was the failure
def test_multiple_aliases():
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ["alias_1", "alias_2"]
default_representation = SphericalRepresentation
# Register a transform, which adds the aliases to the transform graph
tfun = lambda c, f: f.__class__(lon=c.lon, lat=c.lat)
ftrans = FunctionTransform(
tfun,
MultipleAliasesFrame,
MultipleAliasesFrame,
register_graph=frame_transform_graph,
)
coord = SkyCoord(lon=1 * u.deg, lat=2 * u.deg, frame=MultipleAliasesFrame)
# Test attribute-style access returns self (not a copy)
assert coord.alias_1 is coord
assert coord.alias_2 is coord
# Test for aliases in __dir__()
assert "alias_1" in coord.__dir__()
assert "alias_2" in coord.__dir__()
# Test transform_to() calls
assert isinstance(coord.transform_to("alias_1").frame, MultipleAliasesFrame)
assert isinstance(coord.transform_to("alias_2").frame, MultipleAliasesFrame)
ftrans.unregister(frame_transform_graph)
@pytest.mark.parametrize(
"kwargs, error_message",
[
(
{"ra": 1, "dec": 1, "distance": 1 * u.pc, "unit": "deg"},
r"Unit 'deg' \(angle\) could not be applied to 'distance'. ",
),
(
{
"rho": 1 * u.m,
"phi": 1,
"z": 1 * u.m,
"unit": "deg",
"representation_type": "cylindrical",
},
r"Unit 'deg' \(angle\) could not be applied to 'rho'. ",
),
],
)
def test_passing_inconsistent_coordinates_and_units_raises_helpful_error(
kwargs, error_message
):
# https://github.com/astropy/astropy/issues/10725
with pytest.raises(ValueError, match=error_message):
SkyCoord(**kwargs)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_match_to_catalog_3d_and_sky():
# Test for issue #5857. See PR #11449
cfk5_default = SkyCoord(
[1, 2, 3, 4] * u.degree,
[0, 0, 0, 0] * u.degree,
distance=[1, 1, 1.5, 1] * u.kpc,
frame="fk5",
)
cfk5_J1950 = cfk5_default.transform_to(FK5(equinox="J1950"))
idx, angle, quantity = cfk5_J1950.match_to_catalog_3d(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=1e-14 * u.deg, rtol=0)
assert_allclose(quantity, 0 * u.kpc, atol=1e-14 * u.kpc, rtol=0)
idx, angle, distance = cfk5_J1950.match_to_catalog_sky(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=1e-14 * u.deg, rtol=0)
assert_allclose(distance, 0 * u.kpc, atol=1e-14 * u.kpc, rtol=0)
|
2cfb7a7a2a3b8bf2081d54767f8c94678fa45e694a1cf104e8389683fd3d6435 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Define the Enhanced Character-Separated-Values (ECSV) which allows for reading and
writing all the meta data associated with an astropy Table object.
"""
import json
import re
import warnings
from collections import OrderedDict
import numpy as np
from astropy.io.ascii.core import convert_numpy
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core
ECSV_VERSION = "1.0"
DELIMITERS = (" ", ",")
ECSV_DATATYPES = (
"bool",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"float128",
"string",
) # Raise warning if not one of these standard dtypes
class InvalidEcsvDatatypeWarning(AstropyUserWarning):
"""
ECSV specific Astropy warning class.
"""
class EcsvHeader(basic.BasicHeader):
"""Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""Return only non-blank lines that start with the comment regexp. For these
lines strip out the matching characters and leading/trailing whitespace."""
re_comment = re.compile(self.comment)
for line in lines:
line = line.strip()
if not line:
continue
match = re_comment.match(line)
if match:
out = line[match.end() :]
if out:
yield out
else:
# Stop iterating on first failed match for a non-blank line
return
def write(self, lines):
"""
Write header information in the ECSV ASCII format.
This function is called at the point when preprocessing has been done to
convert the input table columns to `self.cols` which is a list of
`astropy.io.ascii.core.Column` objects. In particular `col.str_vals`
is available for each column with the string representation of each
column item for output.
This format starts with a delimiter separated list of the column names
in order to make this format readable by humans and simple csv-type
readers. It then encodes the full table meta and column attributes and
meta as YAML and pretty-prints this in the header. Finally the
delimited column names are repeated again, for humans and readers that
look for the *last* comment line as defining the column names.
"""
if self.splitter.delimiter not in DELIMITERS:
raise ValueError(
"only space and comma are allowed for delimiter in ECSV format"
)
# Now assemble the header dict that will be serialized by the YAML dumper
header = {"cols": self.cols, "schema": "astropy-2.0"}
if self.table_meta:
header["meta"] = self.table_meta
# Set the delimiter only for the non-default option(s)
if self.splitter.delimiter != " ":
header["delimiter"] = self.splitter.delimiter
header_yaml_lines = [
f"%ECSV {ECSV_VERSION}",
"---",
] + meta.get_yaml_from_header(header)
lines.extend([self.write_comment + line for line in header_yaml_lines])
lines.append(self.splitter.join([x.info.name for x in self.cols]))
def write_comments(self, lines, meta):
"""
WRITE: Override the default write_comments to do nothing since this is handled
in the custom write method.
"""
pass
def update_meta(self, lines, meta):
"""
READ: Override the default update_meta to do nothing. This process is done
in get_cols() for this reader.
"""
pass
def get_cols(self, lines):
"""
READ: Initialize the header Column objects from the table ``lines``.
Parameters
----------
lines : list
List of table lines
"""
# Cache a copy of the original input lines before processing below
raw_lines = lines
# Extract non-blank comment (header) lines with comment character stripped
lines = list(self.process_lines(lines))
# Validate that this is a ECSV file
ecsv_header_re = r"""%ECSV [ ]
(?P<major> \d+)
\. (?P<minor> \d+)
\.? (?P<bugfix> \d+)? $"""
no_header_msg = (
'ECSV header line like "# %ECSV <version>" not found as first line.'
" This is required for a ECSV file."
)
if not lines:
raise core.InconsistentTableError(no_header_msg)
match = re.match(ecsv_header_re, lines[0].strip(), re.VERBOSE)
if not match:
raise core.InconsistentTableError(no_header_msg)
try:
header = meta.get_header_from_yaml(lines)
except meta.YamlParseError:
raise core.InconsistentTableError("unable to parse yaml in meta header")
if "meta" in header:
self.table_meta = header["meta"]
if "delimiter" in header:
delimiter = header["delimiter"]
if delimiter not in DELIMITERS:
raise ValueError(
"only space and comma are allowed for delimiter in ECSV format"
)
self.splitter.delimiter = delimiter
self.data.splitter.delimiter = delimiter
# Create the list of io.ascii column objects from `header`
header_cols = OrderedDict((x["name"], x) for x in header["datatype"])
self.names = [x["name"] for x in header["datatype"]]
# Read the first non-commented line of table and split to get the CSV
# header column names. This is essentially what the Basic reader does.
header_line = next(super().process_lines(raw_lines))
header_names = next(self.splitter([header_line]))
# Check for consistency of the ECSV vs. CSV header column names
if header_names != self.names:
raise core.InconsistentTableError(
f"column names from ECSV header {self.names} do not "
f"match names from header line of CSV data {header_names}"
)
# BaseHeader method to create self.cols, which is a list of
# io.ascii.core.Column objects (*not* Table Column objects).
self._set_cols_from_names()
# Transfer attributes from the column descriptor stored in the input
# header YAML metadata to the new columns to create this table.
for col in self.cols:
for attr in ("description", "format", "unit", "meta", "subtype"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
col.dtype = header_cols[col.name]["datatype"]
# Warn if col dtype is not a valid ECSV datatype, but allow reading for
# back-compatibility with existing older files that have numpy datatypes
# like datetime64 or object or python str, which are not in the ECSV standard.
if col.dtype not in ECSV_DATATYPES:
msg = (
f"unexpected datatype {col.dtype!r} of column {col.name!r} "
f"is not in allowed ECSV datatypes {ECSV_DATATYPES}. "
"Using anyway as a numpy dtype but beware since unexpected "
"results are possible."
)
warnings.warn(msg, category=InvalidEcsvDatatypeWarning)
# Subtype is written like "int64[2,null]" and we want to split this
# out to "int64" and [2, None].
subtype = col.subtype
if subtype and "[" in subtype:
idx = subtype.index("[")
col.subtype = subtype[:idx]
col.shape = json.loads(subtype[idx:])
# Convert ECSV "string" to numpy "str"
for attr in ("dtype", "subtype"):
if getattr(col, attr) == "string":
setattr(col, attr, "str")
# ECSV subtype of 'json' maps to numpy 'object' dtype
if col.subtype == "json":
col.subtype = "object"
def _check_dtype_is_str(col):
if col.dtype != "str":
raise ValueError(f'datatype of column {col.name!r} must be "string"')
class EcsvOutputter(core.TableOutputter):
"""
After reading the input lines and processing, convert the Reader columns
and metadata to an astropy.table.Table object. This overrides the default
converters to be an empty list because there is no "guessing" of the
conversion function.
"""
default_converters = []
def __call__(self, cols, meta):
# Convert to a Table with all plain Column subclass columns
out = super().__call__(cols, meta)
# If mixin columns exist (based on the special '__mixin_columns__'
# key in the table ``meta``), then use that information to construct
# appropriate mixin columns and remove the original data columns.
# If no __mixin_columns__ exists then this function just passes back
# the input table.
out = serialize._construct_mixins_from_columns(out)
return out
def _convert_vals(self, cols):
"""READ: Convert str_vals in `cols` to final arrays with correct dtypes.
This is adapted from ``BaseOutputter._convert_vals``. In the case of ECSV
there is no guessing and all types are known in advance. A big change
is handling the possibility of JSON-encoded values, both unstructured
object data and structured values that may contain masked data.
"""
for col in cols:
try:
# 1-d or N-d object columns are serialized as JSON.
if col.subtype == "object":
_check_dtype_is_str(col)
col_vals = [json.loads(val) for val in col.str_vals]
col.data = np.empty([len(col_vals)] + col.shape, dtype=object)
col.data[...] = col_vals
# Variable length arrays with shape (n, m, ..., *) for fixed
# n, m, .. and variable in last axis. Masked values here are
# not currently supported.
elif col.shape and col.shape[-1] is None:
_check_dtype_is_str(col)
# Empty (blank) values in original ECSV are changed to "0"
# in str_vals with corresponding col.mask being created and
# set accordingly. Instead use an empty list here.
if hasattr(col, "mask"):
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = "[]"
# Remake as a 1-d object column of numpy ndarrays or
# MaskedArray using the datatype specified in the ECSV file.
col_vals = []
for str_val in col.str_vals:
obj_val = json.loads(str_val) # list or nested lists
try:
arr_val = np.array(obj_val, dtype=col.subtype)
except TypeError:
# obj_val has entries that are inconsistent with
# dtype. For a valid ECSV file the only possibility
# is None values (indicating missing values).
data = np.array(obj_val, dtype=object)
# Replace all the None with an appropriate fill value
mask = data == None # noqa: E711
kind = np.dtype(col.subtype).kind
data[mask] = {"U": "", "S": b""}.get(kind, 0)
arr_val = np.ma.array(data.astype(col.subtype), mask=mask)
col_vals.append(arr_val)
col.shape = ()
col.dtype = np.dtype(object)
# np.array(col_vals_arr, dtype=object) fails ?? so this workaround:
col.data = np.empty(len(col_vals), dtype=object)
col.data[:] = col_vals
# Multidim columns with consistent shape (n, m, ...). These
# might be masked.
elif col.shape:
_check_dtype_is_str(col)
# Change empty (blank) values in original ECSV to something
# like "[[null, null],[null,null]]" so subsequent JSON
# decoding works. Delete `col.mask` so that later code in
# core TableOutputter.__call__() that deals with col.mask
# does not run (since handling is done here already).
if hasattr(col, "mask"):
all_none_arr = np.full(
shape=col.shape, fill_value=None, dtype=object
)
all_none_json = json.dumps(all_none_arr.tolist())
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = all_none_json
del col.mask
col_vals = [json.loads(val) for val in col.str_vals]
# Make a numpy object array of col_vals to look for None
# (masked values)
data = np.array(col_vals, dtype=object)
mask = data == None # noqa: E711
if not np.any(mask):
# No None's, just convert to required dtype
col.data = data.astype(col.subtype)
else:
# Replace all the None with an appropriate fill value
kind = np.dtype(col.subtype).kind
data[mask] = {"U": "", "S": b""}.get(kind, 0)
# Finally make a MaskedArray with the filled data + mask
col.data = np.ma.array(data.astype(col.subtype), mask=mask)
# Regular scalar value column
else:
if col.subtype:
warnings.warn(
f"unexpected subtype {col.subtype!r} set for column "
f"{col.name!r}, using dtype={col.dtype!r} instead.",
category=InvalidEcsvDatatypeWarning,
)
converter_func, _ = convert_numpy(col.dtype)
col.data = converter_func(col.str_vals)
if col.data.shape[1:] != tuple(col.shape):
raise ValueError(
"shape mismatch between value and column specifier"
)
except json.JSONDecodeError:
raise ValueError(
f"column {col.name!r} failed to convert: "
"column value is not valid JSON"
)
except Exception as exc:
raise ValueError(f"column {col.name!r} failed to convert: {exc}")
class EcsvData(basic.BasicData):
def _set_fill_values(self, cols):
"""READ: Set the fill values of the individual cols based on fill_values of BaseData
For ECSV handle the corner case of data that has been serialized using
the serialize_method='data_mask' option, which writes the full data and
mask directly, AND where that table includes a string column with zero-length
string entries ("") which are valid data.
Normally the super() method will set col.fill_value=('', '0') to replace
blanks with a '0'. But for that corner case subset, instead do not do
any filling.
"""
super()._set_fill_values(cols)
# Get the serialized columns spec. It might not exist and there might
# not even be any table meta, so punt in those cases.
try:
scs = self.header.table_meta["__serialized_columns__"]
except (AttributeError, KeyError):
return
# Got some serialized columns, so check for string type and serialized
# as a MaskedColumn. Without 'data_mask', MaskedColumn objects are
# stored to ECSV as normal columns.
for col in cols:
if (
col.dtype == "str"
and col.name in scs
and scs[col.name]["__class__"] == "astropy.table.column.MaskedColumn"
):
col.fill_values = {} # No data value replacement
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings
This version considerably simplifies the base method:
- No need to set fill values and column formats
- No per-item formatting, just use repr()
- Use JSON for object-type or multidim values
- Only Column or MaskedColumn can end up as cols here.
- Only replace masked values with "", not the generalized filling
"""
for col in self.cols:
if len(col.shape) > 1 or col.info.dtype.kind == "O":
def format_col_item(idx):
obj = col[idx]
try:
obj = obj.tolist()
except AttributeError:
pass
return json.dumps(obj, separators=(",", ":"))
else:
def format_col_item(idx):
return str(col[idx])
try:
col.str_vals = [format_col_item(idx) for idx in range(len(col))]
except TypeError as exc:
raise TypeError(
f"could not convert column {col.info.name!r} to string: {exc}"
) from exc
# Replace every masked value in a 1-d column with an empty string.
# For multi-dim columns this gets done by JSON via "null".
if hasattr(col, "mask") and col.ndim == 1:
for idx in col.mask.nonzero()[0]:
col.str_vals[idx] = ""
out = [col.str_vals for col in self.cols]
return out
class Ecsv(basic.Basic):
"""ECSV (Enhanced Character Separated Values) format table.
Th ECSV format allows for specification of key table and column meta-data, in
particular the data type and unit.
See: https://github.com/astropy/astropy-APEs/blob/main/APE6.rst
Examples
--------
>>> from astropy.table import Table
>>> ecsv_content = '''# %ECSV 0.9
... # ---
... # datatype:
... # - {name: a, unit: m / s, datatype: int64, format: '%03d'}
... # - {name: b, unit: km, datatype: int64, description: This is column b}
... a b
... 001 2
... 004 3
... '''
>>> Table.read(ecsv_content, format='ascii.ecsv')
<Table length=2>
a b
m / s km
int64 int64
----- -----
001 2
004 3
"""
_format_name = "ecsv"
_description = "Enhanced CSV"
_io_registry_suffix = ".ecsv"
header_class = EcsvHeader
data_class = EcsvData
outputter_class = EcsvOutputter
max_ndim = None # No limit on column dimensionality
def update_table_data(self, table):
"""
Update table columns in place if mixin columns are present.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
with serialize_context_as("ecsv"):
out = serialize.represent_mixins_as_columns(table)
return out
|
e3e509f81f2537a2acb8cac6f4e42b45afb957facb7ba2cf2eb3154ec944e999 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
.. _warnings:
Warnings
--------
.. note::
Most of the following warnings indicate violations of the VOTable
specification. They should be reported to the authors of the
tools that produced the VOTable file.
To control the warnings emitted, use the standard Python
:mod:`warnings` module and the ``astropy.io.votable.exceptions.conf.max_warnings``
configuration item. Most of these are of the type `VOTableSpecWarning`.
{warnings}
.. _exceptions:
Exceptions
----------
.. note::
This is a list of many of the fatal exceptions emitted by ``astropy.io.votable``
when the file does not conform to spec. Other exceptions may be
raised due to unforeseen cases or bugs in ``astropy.io.votable`` itself.
{exceptions}
"""
# STDLIB
import io
import re
from textwrap import dedent
from warnings import warn
from astropy import config as _config
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"Conf",
"conf",
"warn_or_raise",
"vo_raise",
"vo_reraise",
"vo_warn",
"warn_unknown_attrs",
"parse_vowarning",
"VOWarning",
"VOTableChangeWarning",
"VOTableSpecWarning",
"UnimplementedWarning",
"IOWarning",
"VOTableSpecError",
]
# NOTE: Cannot put this in __init__.py due to circular import.
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable.exceptions`.
"""
max_warnings = _config.ConfigItem(
10,
"Number of times the same type of warning is displayed before being suppressed",
cfgtype="integer",
)
conf = Conf()
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ("?", "?")
filename = config.get("filename", "?")
return f"{filename}:{pos[0]}:{pos[1]}: {name}: {message}"
def _suppressed_warning(warning, config, stacklevel=2):
warning_class = type(warning)
config.setdefault("_warning_counts", dict()).setdefault(warning_class, 0)
config["_warning_counts"][warning_class] += 1
message_count = config["_warning_counts"][warning_class]
if message_count <= conf.max_warnings:
if message_count == conf.max_warnings:
warning.formatted_message += (
" (suppressing further warnings of this type...)"
)
warn(warning, stacklevel=stacklevel + 1)
def warn_or_raise(
warning_class, exception_class=None, args=(), config=None, pos=None, stacklevel=1
):
"""
Warn or raise an exception, depending on the verify setting.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
config_value = config.get("verify", "warn")
if config_value == "exception":
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
elif config_value == "warn":
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel + 1)
def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos)
def vo_reraise(exc, config=None, pos=None, additional=""):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += " " + additional
exc.args = (message,)
raise exc
def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
if config.get("verify", "warn") != "ignore":
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel + 1)
def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1):
for attr in attrs:
if attr not in good_attr:
vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel + 1)
_warning_pat = re.compile(
r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): "
+ r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$"
)
def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result["warning"] = warning = match.group("warning")
if warning is not None:
result["is_warning"] = warning[0].upper() == "W"
result["is_exception"] = not result["is_warning"]
result["number"] = int(match.group("warning")[1:])
result["doc_url"] = f"io/votable/api_exceptions.html#{warning.lower()}"
else:
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = True
result["number"] = None
result["doc_url"] = None
try:
result["nline"] = int(match.group("nline"))
except ValueError:
result["nline"] = 0
try:
result["nchar"] = int(match.group("nchar"))
except ValueError:
result["nchar"] = 0
result["message"] = match.group("rest")
result["is_something"] = True
else:
result["warning"] = None
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = False
result["is_something"] = False
if not isinstance(line, str):
line = line.decode("utf-8")
result["message"] = line
return result
class VOWarning(AstropyWarning):
"""
The base class of all VO warnings and exceptions.
Handles the formatting of the message with a warning or exception
code, filename, line and column number.
"""
default_args = ()
message_template = ""
def __init__(self, args, config=None, pos=None):
if config is None:
config = {}
if not isinstance(args, tuple):
args = (args,)
msg = self.message_template.format(*args)
self.formatted_message = _format_message(
msg, self.__class__.__name__, config, pos
)
Warning.__init__(self, self.formatted_message)
def __str__(self):
return self.formatted_message
@classmethod
def get_short_name(cls):
if len(cls.default_args):
return cls.message_template.format(*cls.default_args)
return cls.message_template
class VOTableChangeWarning(VOWarning, SyntaxWarning):
"""
A change has been made to the input XML file.
"""
class VOTableSpecWarning(VOWarning, SyntaxWarning):
"""
The input XML file violates the spec, but there is an obvious workaround.
"""
class UnimplementedWarning(VOWarning, SyntaxWarning):
"""
A feature of the VOTABLE_ spec is not implemented.
"""
class IOWarning(VOWarning, RuntimeWarning):
"""
A network or IO error occurred, but was recovered using the cache.
"""
class VOTableSpecError(VOWarning, ValueError):
"""
The input XML file violates the spec and there is no good workaround.
"""
class W01(VOTableSpecWarning):
"""
The VOTable spec states:
If a cell contains an array or complex number, it should be
encoded as multiple numbers separated by whitespace.
Many VOTable files in the wild use commas as a separator instead,
and ``astropy.io.votable`` can support this convention depending on the
:ref:`astropy:verifying-votables` setting.
``astropy.io.votable`` always outputs files using only spaces, regardless of
how they were input.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__
"""
message_template = "Array uses commas rather than whitespace"
class W02(VOTableSpecWarning):
r"""
XML ids must match the following regular expression::
^[A-Za-z_][A-Za-z0-9_\.\-]*$
The VOTable 1.1 says the following:
According to the XML standard, the attribute ``ID`` is a
string beginning with a letter or underscore (``_``), followed
by a sequence of letters, digits, or any of the punctuation
characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or
``:`` (colon).
However, this is in conflict with the XML standard, which says
colons may not be used. VOTable 1.1's own schema does not allow a
colon here. Therefore, ``astropy.io.votable`` disallows the colon.
VOTable 1.2 corrects this error in the specification.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`XML Names <http://www.w3.org/TR/REC-xml/#NT-Name>`__
"""
message_template = "{} attribute '{}' is invalid. Must be a standard XML id"
default_args = ("x", "y")
class W03(VOTableChangeWarning):
"""
The VOTable 1.1 spec says the following about ``name`` vs. ``ID``
on ``FIELD`` and ``VALUE`` elements:
``ID`` and ``name`` attributes have a different role in
VOTable: the ``ID`` is meant as a *unique identifier* of an
element seen as a VOTable component, while the ``name`` is
meant for presentation purposes, and need not to be unique
throughout the VOTable document. The ``ID`` attribute is
therefore required in the elements which have to be
referenced, but in principle any element may have an ``ID``
attribute. ... In summary, the ``ID`` is different from the
``name`` attribute in that (a) the ``ID`` attribute is made
from a restricted character set, and must be unique throughout
a VOTable document whereas names are standard XML attributes
and need not be unique; and (b) there should be support in the
parsing software to look up references and extract the
relevant element with matching ``ID``.
It is further recommended in the VOTable 1.2 spec:
While the ``ID`` attribute has to be unique in a VOTable
document, the ``name`` attribute need not. It is however
recommended, as a good practice, to assign unique names within
a ``TABLE`` element. This recommendation means that, between a
``TABLE`` and its corresponding closing ``TABLE`` tag,
``name`` attributes of ``FIELD``, ``PARAM`` and optional
``GROUP`` elements should be all different.
Since ``astropy.io.votable`` requires a unique identifier for each of its
columns, ``ID`` is used for the column name when present.
However, when ``ID`` is not present, (since it is not required by
the specification) ``name`` is used instead. However, ``name``
must be cleansed by replacing invalid characters (such as
whitespace) with underscores.
.. note::
This warning does not indicate that the input file is invalid
with respect to the VOTable specification, only that the
column names in the record array may not match exactly the
``name`` attributes specified in the file.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Implicitly generating an ID from a name '{}' -> '{}'"
default_args = ("x", "y")
class W04(VOTableSpecWarning):
"""
The ``content-type`` attribute must use MIME content-type syntax as
defined in `RFC 2046 <https://tools.ietf.org/html/rfc2046>`__.
The current check for validity is somewhat over-permissive.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "content-type '{}' must be a valid MIME content type"
default_args = ("x",)
class W05(VOTableSpecWarning):
"""
The attribute must be a valid URI as defined in `RFC 2396
<https://www.ietf.org/rfc/rfc2396.txt>`_.
"""
message_template = "'{}' is not a valid URI"
default_args = ("x",)
class W06(VOTableSpecWarning):
"""
This warning is emitted when a ``ucd`` attribute does not match
the syntax of a `unified content descriptor
<http://vizier.u-strasbg.fr/doc/UCD.htx>`__.
If the VOTable version is 1.2 or later, the UCD will also be
checked to ensure it conforms to the controlled vocabulary defined
by UCD1+.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:ucd>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:ucd>`__
"""
message_template = "Invalid UCD '{}': {}"
default_args = ("x", "explanation")
class W07(VOTableSpecWarning):
"""
As astro year field is a Besselian or Julian year matching the
regular expression::
^[JB]?[0-9]+([.][0-9]*)?$
Defined in this XML Schema snippet::
<xs:simpleType name="astroYear">
<xs:restriction base="xs:token">
<xs:pattern value="[JB]?[0-9]+([.][0-9]*)?"/>
</xs:restriction>
</xs:simpleType>
"""
message_template = "Invalid astroYear in {}: '{}'"
default_args = ("x", "y")
class W08(VOTableSpecWarning):
"""
To avoid local-dependent number parsing differences, ``astropy.io.votable``
may require a string or unicode string where a numeric type may
make more sense.
"""
message_template = "'{}' must be a str or bytes object"
default_args = ("x",)
class W09(VOTableSpecWarning):
"""
The VOTable specification uses the attribute name ``ID`` (with
uppercase letters) to specify unique identifiers. Some
VOTable-producing tools use the more standard lowercase ``id``
instead. ``astropy.io.votable`` accepts ``id`` and emits this warning if
``verify`` is ``'warn'``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "ID attribute not capitalized"
class W10(VOTableSpecWarning):
"""
The parser has encountered an element that does not exist in the
specification, or appears in an invalid context. Check the file
against the VOTable schema (with a tool such as `xmllint
<http://xmlsoft.org/xmllint.html>`__. If the file validates
against the schema, and you still receive this warning, this may
indicate a bug in ``astropy.io.votable``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Unknown tag '{}'. Ignoring"
default_args = ("x",)
class W11(VOTableSpecWarning):
"""
Earlier versions of the VOTable specification used a ``gref``
attribute on the ``LINK`` element to specify a `GLU reference
<http://aladin.u-strasbg.fr/glu/>`__. New files should
specify a ``glu:`` protocol using the ``href`` attribute.
Since ``astropy.io.votable`` does not currently support GLU references, it
likewise does not automatically convert the ``gref`` attribute to
the new form.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "The gref attribute on LINK is deprecated in VOTable 1.1"
class W12(VOTableChangeWarning):
"""
In order to name the columns of the Numpy record array, each
``FIELD`` element must have either an ``ID`` or ``name`` attribute
to derive a name from. Strictly speaking, according to the
VOTable schema, the ``name`` attribute is required. However, if
``name`` is not present by ``ID`` is, and ``verify`` is not ``'exception'``,
``astropy.io.votable`` will continue without a ``name`` defined.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = (
"'{}' element must have at least one of 'ID' or 'name' attributes"
)
default_args = ("x",)
class W13(VOTableSpecWarning):
"""
Some VOTable files in the wild use non-standard datatype names. These
are mapped to standard ones using the following mapping::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' is not a valid VOTable datatype, should be '{}'"
default_args = ("x", "y")
# W14: Deprecated
class W15(VOTableSpecWarning):
"""
The ``name`` attribute is required on every ``FIELD`` element.
However, many VOTable files in the wild omit it and provide only
an ``ID`` instead. In this case, when ``verify`` is not ``'exception'``
``astropy.io.votable`` will copy the ``name`` attribute to a new ``ID``
attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "{} element missing required 'name' attribute"
default_args = ("x",)
# W16: Deprecated
class W17(VOTableSpecWarning):
"""
A ``DESCRIPTION`` element can only appear once within its parent
element.
According to the schema, it may only occur once (`1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__)
However, it is a `proposed extension
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:addesc>`__
to VOTable 1.2.
"""
message_template = "{} element contains more than one DESCRIPTION element"
default_args = ("x",)
class W18(VOTableSpecWarning):
"""
The number of rows explicitly specified in the ``nrows`` attribute
does not match the actual number of rows (``TR`` elements) present
in the ``TABLE``. This may indicate truncation of the file, or an
internal error in the tool that produced it. If ``verify`` is not
``'exception'``, parsing will proceed, with the loss of some performance.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__
"""
message_template = "TABLE specified nrows={}, but table contains {} rows"
default_args = ("x", "y")
class W19(VOTableSpecWarning):
"""
The column fields as defined using ``FIELD`` elements do not match
those in the headers of the embedded FITS file. If ``verify`` is not
``'exception'``, the embedded FITS file will take precedence.
"""
message_template = (
"The fields defined in the VOTable do not match those in the "
+ "embedded FITS file"
)
class W20(VOTableSpecWarning):
"""
If no version number is explicitly given in the VOTable file, the
parser assumes it is written to the VOTable 1.1 specification.
"""
message_template = "No version number specified in file. Assuming {}"
default_args = ("1.1",)
class W21(UnimplementedWarning):
"""
Unknown issues may arise using ``astropy.io.votable`` with VOTable files
from a version other than 1.1, 1.2, 1.3, or 1.4.
"""
message_template = (
"astropy.io.votable is designed for VOTable version 1.1, 1.2, 1.3,"
" and 1.4, but this file is {}"
)
default_args = ("x",)
class W22(VOTableSpecWarning):
"""
Version 1.0 of the VOTable specification used the ``DEFINITIONS``
element to define coordinate systems. Version 1.1 now uses
``COOSYS`` elements throughout the document.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:definitions>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:definitions>`__
"""
message_template = "The DEFINITIONS element is deprecated in VOTable 1.1. Ignoring"
class W23(IOWarning):
"""
Raised when the VO service database can not be updated (possibly
due to a network outage). This is only a warning, since an older
and possible out-of-date VO service database was available
locally.
"""
message_template = "Unable to update service information for '{}'"
default_args = ("x",)
class W24(VOWarning, FutureWarning):
"""
The VO catalog database retrieved from the www is designed for a
newer version of ``astropy.io.votable``. This may cause problems or limited
features performing service queries. Consider upgrading ``astropy.io.votable``
to the latest version.
"""
message_template = (
"The VO catalog database is for a later version of astropy.io.votable"
)
class W25(IOWarning):
"""
A VO service query failed due to a network error or malformed
arguments. Another alternative service may be attempted. If all
services fail, an exception will be raised.
"""
message_template = "'{}' failed with: {}"
default_args = ("service", "...")
class W26(VOTableSpecWarning):
"""
The given element was not supported inside of the given element
until the specified VOTable version, however the version declared
in the file is for an earlier version. These attributes may not
be written out to the file.
"""
message_template = "'{}' inside '{}' added in VOTable {}"
default_args = ("child", "parent", "X.X")
class W27(VOTableSpecWarning):
"""
The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in
favor of a reference to the Space-Time Coordinate (STC) data
model (see `utype
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__
and the IVOA note `referencing STC in VOTable
<http://ivoa.net/Documents/latest/VOTableSTC.html>`__.
"""
message_template = "COOSYS deprecated in VOTable 1.2"
class W28(VOTableSpecWarning):
"""
The given attribute was not supported on the given element until the
specified VOTable version, however the version declared in the file is
for an earlier version. These attributes may not be written out to
the file.
"""
message_template = "'{}' on '{}' added in VOTable {}"
default_args = ("attribute", "element", "X.X")
class W29(VOTableSpecWarning):
"""
Some VOTable files specify their version number in the form "v1.0",
when the only supported forms in the spec are "1.0".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Version specified in non-standard form '{}'"
default_args = ("v1.0",)
class W30(VOTableSpecWarning):
"""
Some VOTable files write missing floating-point values in non-standard ways,
such as "null" and "-". If ``verify`` is not ``'exception'``, any
non-standard floating-point literals are treated as missing values.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid literal for float '{}'. Treating as empty."
default_args = ("x",)
class W31(VOTableSpecWarning):
"""
Since NaN's can not be represented in integer fields directly, a null
value must be specified in the FIELD descriptor to support reading
NaN's from the tabledata.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "NaN given in an integral field without a specified null value"
class W32(VOTableSpecWarning):
"""
Each field in a table must have a unique ID. If two or more fields
have the same ID, some will be renamed to ensure that all IDs are
unique.
From the VOTable 1.2 spec:
The ``ID`` and ``ref`` attributes are defined as XML types
``ID`` and ``IDREF`` respectively. This means that the
contents of ``ID`` is an identifier which must be unique
throughout a VOTable document, and that the contents of the
``ref`` attribute represents a reference to an identifier
which must exist in the VOTable document.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Duplicate ID '{}' renamed to '{}' to ensure uniqueness"
default_args = ("x", "x_2")
class W33(VOTableChangeWarning):
"""
Each field in a table must have a unique name. If two or more
fields have the same name, some will be renamed to ensure that all
names are unique.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Column name '{}' renamed to '{}' to ensure uniqueness"
default_args = ("x", "x_2")
class W34(VOTableSpecWarning):
"""
The attribute requires the value to be a valid XML token, as
defined by `XML 1.0
<http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__.
"""
message_template = "'{}' is an invalid token for attribute '{}'"
default_args = ("x", "y")
class W35(VOTableSpecWarning):
"""
The ``name`` and ``value`` attributes are required on all ``INFO``
elements.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__
"""
message_template = "'{}' attribute required for INFO elements"
default_args = ("x",)
class W36(VOTableSpecWarning):
"""
If the field specifies a ``null`` value, that value must conform
to the given ``datatype``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "null value '{}' does not match field datatype, setting to 0"
default_args = ("x",)
class W37(UnimplementedWarning):
"""
The 3 datatypes defined in the VOTable specification and supported by
``astropy.io.votable`` are ``TABLEDATA``, ``BINARY`` and ``FITS``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__
"""
message_template = "Unsupported data format '{}'"
default_args = ("x",)
class W38(VOTableSpecWarning):
"""
The only encoding for local binary data supported by the VOTable
specification is base64.
"""
message_template = "Inline binary data must be base64 encoded, got '{}'"
default_args = ("x",)
class W39(VOTableSpecWarning):
"""
Bit values do not support masking. This warning is raised upon
setting masked data in a bit column.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Bit values can not be masked"
class W40(VOTableSpecWarning):
"""
This is a terrible hack to support Simple Image Access Protocol
results from `NOIRLab Astro Data Archive <https://astroarchive.noirlab.edu/>`__. It
creates a field for the coordinate projection type of type "double",
which actually contains character data. We have to hack the field
to store character data, or we can't read it in. A warning will be
raised when this happens.
"""
message_template = "'cprojection' datatype repaired"
class W41(VOTableSpecWarning):
"""
An XML namespace was specified on the ``VOTABLE`` element, but the
namespace does not match what is expected for a ``VOTABLE`` file.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
Some files in the wild set the namespace to the location of the
VOTable schema, which is not correct and will not pass some
validating parsers.
"""
message_template = (
"An XML namespace is specified, but is incorrect. Expected '{}', got '{}'"
)
default_args = ("x", "y")
class W42(VOTableSpecWarning):
"""
The root element should specify a namespace.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
"""
message_template = "No XML namespace specified"
class W43(VOTableSpecWarning):
"""
Referenced elements should be defined before referees. From the
VOTable 1.2 spec:
In VOTable1.2, it is further recommended to place the ID
attribute prior to referencing it whenever possible.
"""
message_template = "{} ref='{}' which has not already been defined"
default_args = ("element", "x")
class W44(VOTableSpecWarning):
"""
``VALUES`` elements that reference another element should not have
their own content.
From the VOTable 1.2 spec:
The ``ref`` attribute of a ``VALUES`` element can be used to
avoid a repetition of the domain definition, by referring to a
previously defined ``VALUES`` element having the referenced
``ID`` attribute. When specified, the ``ref`` attribute
defines completely the domain without any other element or
attribute, as e.g. ``<VALUES ref="RAdomain"/>``
"""
message_template = "VALUES element with ref attribute has content ('{}')"
default_args = ("element",)
class W45(VOWarning, ValueError):
"""
The ``content-role`` attribute on the ``LINK`` element must be one of
the following::
query, hints, doc, location
And in VOTable 1.3, additionally::
type
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
`1.3
<http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#sec:link>`__
"""
message_template = "content-role attribute '{}' invalid"
default_args = ("x",)
class W46(VOTableSpecWarning):
"""
The given char or unicode string is too long for the specified
field length.
"""
message_template = "{} value is too long for specified length of {}"
default_args = ("char or unicode", "x")
class W47(VOTableSpecWarning):
"""
If no arraysize is specified on a char field, the default of '1'
is implied, but this is rarely what is intended.
"""
message_template = "Missing arraysize indicates length 1"
class W48(VOTableSpecWarning):
"""
The attribute is not defined in the specification.
"""
message_template = "Unknown attribute '{}' on {}"
default_args = ("attribute", "element")
class W49(VOTableSpecWarning):
"""
Prior to VOTable 1.3, the empty cell was illegal for integer
fields.
If a \"null\" value was specified for the cell, it will be used
for the value, otherwise, 0 will be used.
"""
message_template = "Empty cell illegal for integer fields."
class W50(VOTableSpecWarning):
"""
Invalid unit string as defined in the `Units in the VO, Version 1.0
<https://www.ivoa.net/documents/VOUnits>`_ (VOTable version >= 1.4)
or `Standards for Astronomical Catalogues, Version 2.0
<http://cdsarc.u-strasbg.fr/doc/catstd-3.2.htx>`_ (version < 1.4).
Consider passing an explicit ``unit_format`` parameter if the units
in this file conform to another specification.
"""
message_template = "Invalid unit string '{}'"
default_args = ("x",)
class W51(VOTableSpecWarning):
"""
The integer value is out of range for the size of the field.
"""
message_template = "Value '{}' is out of range for a {} integer field"
default_args = ("x", "n-bit")
class W52(VOTableSpecWarning):
"""
The BINARY2 format was introduced in VOTable 1.3. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The BINARY2 format was introduced in VOTable 1.3, but "
"this file is declared as version '{}'"
)
default_args = ("1.2",)
class W53(VOTableSpecWarning):
"""
The VOTABLE element must contain at least one RESOURCE element.
"""
message_template = "VOTABLE element must contain at least one RESOURCE element."
default_args = ()
class W54(VOTableSpecWarning):
"""
The TIMESYS element was introduced in VOTable 1.4. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The TIMESYS element was introduced in VOTable 1.4, but "
"this file is declared as version '{}'"
)
default_args = ("1.3",)
class W55(VOTableSpecWarning):
"""
When non-ASCII characters are detected when reading
a TABLEDATA value for a FIELD with ``datatype="char"``, we
can issue this warning.
"""
message_template = (
'FIELD ({}) has datatype="char" but contains non-ASCII value ({})'
)
default_args = ("", "")
class E01(VOWarning, ValueError):
"""
The size specifier for a ``char`` or ``unicode`` field must be
only a number followed, optionally, by an asterisk.
Multi-dimensional size specifiers are not supported for these
datatypes.
Strings, which are defined as a set of characters, can be
represented in VOTable as a fixed- or variable-length array of
characters::
<FIELD name="unboundedString" datatype="char" arraysize="*"/>
A 1D array of strings can be represented as a 2D array of
characters, but given the logic above, it is possible to define a
variable-length array of fixed-length strings, but not a
fixed-length array of variable-length strings.
"""
message_template = "Invalid size specifier '{}' for a {} field (in field '{}')"
default_args = ("x", "char/unicode", "y")
class E02(VOWarning, ValueError):
"""
The number of array elements in the data does not match that specified
in the FIELD specifier.
"""
message_template = (
"Incorrect number of elements in array. Expected multiple of {}, got {}"
)
default_args = ("x", "y")
class E03(VOWarning, ValueError):
"""
Complex numbers should be two values separated by whitespace.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' does not parse as a complex number"
default_args = ("x",)
class E04(VOWarning, ValueError):
"""
A ``bit`` array should be a string of '0's and '1's.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid bit value '{}'"
default_args = ("x",)
class E05(VOWarning, ValueError):
r"""
A ``boolean`` value should be one of the following strings (case
insensitive) in the ``TABLEDATA`` format::
'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?'
and in ``BINARY`` format::
'T', 'F', '1', '0', '\0', ' ', '?'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid boolean value '{}'"
default_args = ("x",)
class E06(VOWarning, ValueError):
"""
The supported datatypes are::
double, float, bit, boolean, unsignedByte, short, int, long,
floatComplex, doubleComplex, char, unicodeChar
The following non-standard aliases are also supported, but in
these case :ref:`W13 <W13>` will be raised::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Unknown datatype '{}' on field '{}'"
default_args = ("x", "y")
# E07: Deprecated
class E08(VOWarning, ValueError):
"""
The ``type`` attribute on the ``VALUES`` element must be either
``legal`` or ``actual``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "type must be 'legal' or 'actual', but is '{}'"
default_args = ("x",)
class E09(VOWarning, ValueError):
"""
The ``MIN``, ``MAX`` and ``OPTION`` elements must always have a
``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "'{}' must have a value attribute"
default_args = ("x",)
class E10(VOWarning, ValueError):
"""
From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have
a ``datatype`` field.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "'datatype' attribute required on all '{}' elements"
default_args = ("FIELD",)
class E11(VOWarning, ValueError):
"""
The precision attribute is meant to express the number of significant
digits, either as a number of decimal places (e.g. ``precision="F2"`` or
equivalently ``precision="2"`` to express 2 significant figures
after the decimal point), or as a number of significant figures
(e.g. ``precision="E5"`` indicates a relative precision of 10-5).
It is validated using the following regular expression::
[EF]?[1-9][0-9]*
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "precision '{}' is invalid"
default_args = ("x",)
class E12(VOWarning, ValueError):
"""
The width attribute is meant to indicate to the application the
number of characters to be used for input or output of the
quantity.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "width must be a positive integer, got '{}'"
default_args = ("x",)
class E13(VOWarning, ValueError):
r"""
From the VOTable 1.2 spec:
A table cell can contain an array of a given primitive type,
with a fixed or variable number of elements; the array may
even be multidimensional. For instance, the position of a
point in a 3D space can be defined by the following::
<FIELD ID="point_3D" datatype="double" arraysize="3"/>
and each cell corresponding to that definition must contain
exactly 3 numbers. An asterisk (\*) may be appended to
indicate a variable number of elements in the array, as in::
<FIELD ID="values" datatype="int" arraysize="100*"/>
where it is specified that each cell corresponding to that
definition contains 0 to 100 integer numbers. The number may
be omitted to specify an unbounded array (in practice up to
=~2×10⁹ elements).
A table cell can also contain a multidimensional array of a
given primitive type. This is specified by a sequence of
dimensions separated by the ``x`` character, with the first
dimension changing fastest; as in the case of a simple array,
the last dimension may be variable in length. As an example,
the following definition declares a table cell which may
contain a set of up to 10 images, each of 64×64 bytes::
<FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/>
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__
"""
message_template = "Invalid arraysize attribute '{}'"
default_args = ("x",)
class E14(VOWarning, ValueError):
"""
All ``PARAM`` elements must have a ``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "value attribute is required for all PARAM elements"
class E15(VOWarning, ValueError):
"""
All ``COOSYS`` elements must have an ``ID`` attribute.
Note that the VOTable 1.1 specification says this attribute is
optional, but its corresponding schema indicates it is required.
In VOTable 1.2, the ``COOSYS`` element is deprecated.
"""
message_template = "ID attribute is required for all COOSYS elements"
class E16(VOTableSpecWarning):
"""
The ``system`` attribute on the ``COOSYS`` element must be one of the
following::
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', 'geo_app'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:COOSYS>`__
"""
message_template = "Invalid system attribute '{}'"
default_args = ("x",)
class E17(VOWarning, ValueError):
"""
``extnum`` attribute must be a positive integer.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "extnum must be a positive integer"
class E18(VOWarning, ValueError):
"""
The ``type`` attribute of the ``RESOURCE`` element must be one of
"results" or "meta".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "type must be 'results' or 'meta', not '{}'"
default_args = ("x",)
class E19(VOWarning, ValueError):
"""
Raised either when the file doesn't appear to be XML, or the root
element is not VOTABLE.
"""
message_template = "File does not appear to be a VOTABLE"
class E20(VOTableSpecError):
"""
The table had only *x* fields defined, but the data itself has more
columns than that.
"""
message_template = "Data has more columns than are defined in the header ({})"
default_args = ("x",)
class E21(VOWarning, ValueError):
"""
The table had *x* fields defined, but the data itself has only *y*
columns.
"""
message_template = "Data has fewer columns ({}) than are defined in the header ({})"
default_args = ("x", "y")
class E22(VOWarning, ValueError):
"""
All ``TIMESYS`` elements must have an ``ID`` attribute.
"""
message_template = "ID attribute is required for all TIMESYS elements"
class E23(VOTableSpecWarning):
"""
The ``timeorigin`` attribute on the ``TIMESYS`` element must be
either a floating point literal specifying a valid Julian Date,
or, for convenience, the string "MJD-origin" (standing for 2400000.5)
or the string "JD-origin" (standing for 0).
**References**: `1.4
<http://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC21>`__
"""
message_template = "Invalid timeorigin attribute '{}'"
default_args = ("x",)
class E24(VOWarning, ValueError):
"""
Non-ASCII unicode values should not be written when the FIELD ``datatype="char"``,
and cannot be written in BINARY or BINARY2 serialization.
"""
message_template = (
'Attempt to write non-ASCII value ({}) to FIELD ({}) which has datatype="char"'
)
default_args = ("", "")
class E25(VOTableSpecWarning):
"""
A VOTable cannot have a DATA section without any defined FIELD; DATA will be ignored.
"""
message_template = "No FIELDs are defined; DATA section will be ignored."
def _get_warning_and_exception_classes(prefix):
classes = []
for key, val in globals().items():
if re.match(prefix + "[0-9]{2}", key):
classes.append((key, val))
classes.sort()
return classes
def _build_doc_string():
def generate_set(prefix):
classes = _get_warning_and_exception_classes(prefix)
out = io.StringIO()
for name, cls in classes:
out.write(f".. _{name}:\n\n")
msg = f"{cls.__name__}: {cls.get_short_name()}"
if not isinstance(msg, str):
msg = msg.decode("utf-8")
out.write(msg)
out.write("\n")
out.write("~" * len(msg))
out.write("\n\n")
doc = cls.__doc__
if not isinstance(doc, str):
doc = doc.decode("utf-8")
out.write(dedent(doc))
out.write("\n\n")
return out.getvalue()
warnings = generate_set("W")
exceptions = generate_set("E")
return {"warnings": warnings, "exceptions": exceptions}
if __doc__ is not None:
__doc__ = __doc__.format(**_build_doc_string())
__all__.extend([x[0] for x in _get_warning_and_exception_classes("W")])
__all__.extend([x[0] for x in _get_warning_and_exception_classes("E")])
|
f6e6162db6db1d02886d0459c49d6864c7759b211af0a106bef2efc81177a595 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import numpy as np
import pytest
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.io.fits.column import NUMPY2FITS, ColumnAttribute, Delayed
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from astropy.table import Table
from astropy.units import Unit, UnitsWarning, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from .conftest import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == "float32" or bb.dtype.name == "float32":
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.0
if np.any(mask0):
if diff[mask0].max() != 0.0:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == "S":
fielda = decode_ascii(fielda)
if fieldb.dtype.char == "S":
fieldb = decode_ascii(fieldb)
if not isinstance(fielda, type(fieldb)) and not isinstance(
fieldb, type(fielda)
):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f"field {i} type differs")
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
elif isinstance(fielda, fits.column._VLF) or isinstance(
fieldb, fits.column._VLF
):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f"fielda[{row}]: {fielda[row]}")
print(f"fieldb[{row}]: {fieldb[row]}")
print(f"field {i} differs in row {row}")
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [
k for k, v in fits.Column.__dict__.items() if isinstance(v, ColumnAttribute)
]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr["FILENAME"] = "labq01i3q_rawtag.fits"
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert thdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self, home_is_data):
# open some existing FITS files:
tt = fits.open(self.data("tb.fits"))
fd = fits.open(self.data("test0.fits"))
# create some local arrays
a1 = chararray.array(["abc", "def", "xx"])
r1 = np.array([11.0, 12.0, 13.0], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name="abc", format="3A", array=a1)
c2 = fits.Column(name="def", format="E", array=r1)
a3 = np.array([3, 4, 5], dtype="i2")
c3 = fits.Column(name="xyz", format="I", array=a3)
a4 = np.array([1, 2, 3], dtype="i2")
c4 = fits.Column(name="t1", format="I", array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype="c8")
c5 = fits.Column(name="t2", format="C", array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name="t3", format="X", array=a6)
a7 = np.array([101, 102, 103], dtype="i4")
c7 = fits.Column(name="t4", format="J", array=a7)
a8 = np.array(
[
[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
],
dtype=np.uint8,
)
c8 = fits.Column(name="t5", format="11X", array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view("bool")).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field("abc")) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp("tableout1.fits"), overwrite=True)
with fits.open(self.temp("tableout1.fits")) as f2:
temp = f2[1].data.field(7)
assert (
temp[0]
== [
True,
True,
False,
True,
False,
True,
True,
True,
False,
False,
True,
]
).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp("tableout2.fits"), "append")
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data("tb.fits"))
assert t[1].header["tform1"] == "1J"
info = {
"name": ["c1", "c2", "c3", "c4"],
"format": ["1J", "3A", "1E", "1L"],
"unit": ["", "", "", ""],
"null": [-2147483647, "", "", ""],
"bscale": ["", "", 3, ""],
"bzero": ["", "", 0.4, ""],
"disp": ["I11", "A3", "G15.7", "L6"],
"start": ["", "", "", ""],
"dim": ["", "", "", ""],
"coord_inc": ["", "", "", ""],
"coord_type": ["", "", "", ""],
"coord_unit": ["", "", "", ""],
"coord_ref_point": ["", "", "", ""],
"coord_ref_value": ["", "", "", ""],
"time_ref_pos": ["", "", "", ""],
}
assert t[1].columns.info(output=False) == info
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field("c4")[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, "c4")) == "[84 84]"
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data("ascii.fits"))
ra1 = np.rec.array(
[
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345),
],
names="c1, c2",
)
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names="c1, c2")
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array(
[(10.123000144958496, 37), (15.609999656677246, 17), (345.0, 345)],
names="c1, c2",
)
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(["abcd", "def"])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name="abc", format="A3", start=19, array=a1)
c2 = fits.Column(name="def", format="E", start=3, array=r1)
c3 = fits.Column(name="t1", format="I", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert dict(hdu.data.dtype.fields) == {
"abc": (np.dtype("|S3"), 18),
"def": (np.dtype("|S15"), 2),
"t1": (np.dtype("|S10"), 21),
}
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11.0, 12.0])
c2 = fits.Column(name="def", format="D", array=r1, bscale=2.3, bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with open(self.temp("toto.fits")) as f:
assert "4.95652173913043548D+00" in f.read()
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name="t2", format="I2", array=[91, 92, 93])
c2 = fits.Column(name="t4", format="I5", array=[91, 92, 93])
c3 = fits.Column(name="t8", format="I10", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype="uint8")
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
hduL = fits.open(self.temp("testendian.fits"))
rfiHDU = hduL["RFI"]
data = rfiHDU.data
channelsOut = data.field("Channels")[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1.0, 2.0, 3.0, 4.0]
a1 = np.array(a, dtype="<f8")
a2 = np.array(a, dtype=">f8")
col1 = fits.Column(name="a", format="D", array=a1)
col2 = fits.Column(name="b", format="D", array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data["a"] == a1).all()
assert (tbhdu.data["b"] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
with fits.open(self.temp("testendian.fits")) as hdul:
assert (hdul[1].data["a"] == a2).all()
assert (hdul[1].data["b"] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float32,a10",
names="order,name,mag,Sp",
)
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "S20", "float32", "S10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "U20", "float32", "U10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == "Serius"
assert hdu.data[1][1] == "Canopys"
assert (hdu.data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == "A1V"
assert hdu.data[1][3] == "F0Ib"
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert (hdul[1].data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == "Serius"
assert hdul[1].data[1][1] == "Canopys"
assert (
hdul[1].data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)
).all()
assert hdul[1].data[0][3] == "A1V"
assert hdul[1].data[1][3] == "F0Ib"
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array(
[(1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib")],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data("tb.fits")) as h:
data = h[1].data
new_data = np.array([(3, "qwe", 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith("FITS_rec(")
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert t1[1].columns._arrays[1] is t1[1].columns.columns[1].array
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp("newtable.fits"))
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 19, "8R x 5C", "[10A, J, 10A, 5E, L]", ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
("NGC5", 412, "", z, False),
("NGC6", 434, "", z, True),
("NGC7", 408, "", z, False),
("NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
col = fits.Column(name="a", array=np.array([1, 2]), format="K")
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ["target", "V_mag", "a"]
array = np.rec.array(
[("NGC1001", 11.1, 1), ("NGC1002", 12.3, 2), ("NGC1003", 15.2, 0)],
formats="a20,f4,i8",
)
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
tbhdu.columns.del_col("flag")
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z),
("NGC2", 334, "", z),
("NGC3", 308, "", z),
("NCG4", 317, "", z),
],
formats="a10,u4,a10,5f4",
)
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col("counts")
tbhdu.columns.del_col("notes")
assert tbhdu.columns.names == ["target", "spectrum"]
array = np.rec.array(
[("NGC1", z), ("NGC2", z), ("NGC3", z), ("NCG4", z)], formats="a10,5f4"
)
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
tbhdu.columns.del_col("V_mag")
assert tbhdu.columns.names == ["target"]
array = np.rec.array([("NGC1001",), ("NGC1002",), ("NGC1003",)], formats="a20")
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target1", format="10A", array=names)
c2 = fits.Column(name="counts1", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes1", format="A10")
c4 = fits.Column(name="spectrum1", format="5E")
c5 = fits.Column(name="flag1", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp("newtable.fits"))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(
1,
"",
1,
"BinTableHDU",
30,
"4R x 10C",
"[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]",
"",
),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
assert hdu.columns.names == [
"target",
"counts",
"notes",
"spectrum",
"flag",
"target1",
"counts1",
"notes1",
"spectrum1",
"flag1",
]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {"a": 2, "b": "b", "c": 2.3}
data = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "S1"), ("c", float)],
)
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
header = hdul[1].header
assert header["TNULL1"] == 2
assert header["TNULL2"] == "b"
assert header["TNULL3"] == 2.3
def test_multidimension_table_from_numpy_rec_columns(self):
"""Regression test for https://github.com/astropy/astropy/issues/5280
and https://github.com/astropy/astropy/issues/5287
multidimentional tables can now be written with the correct TDIM.
Author: Stephen Bailey.
"""
dtype = [
("x", (str, 5)), # 1D column of 5-character strings
("y", (str, 3), (4,)), # 2D column; each row is four 3-char strings
]
data = np.zeros(2, dtype=dtype)
data["x"] = ["abcde", "xyz"]
data["y"][0] = ["A", "BC", "DEF", "123"]
data["y"][1] = ["X", "YZ", "PQR", "999"]
table = Table(data)
# Test convenience functions io.fits.writeto / getdata
fits.writeto(self.temp("test.fits"), data)
dx = fits.getdata(self.temp("test.fits"))
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), "x: {} != {}".format(data["x"], dx["x"])
assert np.all(data["y"] == dx["y"]), "y: {} != {}".format(data["y"], dx["y"])
# Test fits.BinTableHDU(data) and avoid convenience functions
hdu0 = fits.PrimaryHDU()
hdu1 = fits.BinTableHDU(data)
hx = fits.HDUList([hdu0, hdu1])
hx.writeto(self.temp("test2.fits"))
fx = fits.open(self.temp("test2.fits"))
dx = fx[1].data
fx.close()
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), "x: {} != {}".format(data["x"], dx["x"])
assert np.all(data["y"] == dx["y"]), "y: {} != {}".format(data["y"], dx["y"])
# Test Table write and read
table.write(self.temp("test3.fits"))
tx = Table.read(self.temp("test3.fits"), character_as_bytes=False)
assert table["x"].dtype == tx["x"].dtype
assert table["y"].dtype == tx["y"].dtype
assert np.all(table["x"] == tx["x"]), "x: {} != {}".format(table["x"], tx["x"])
assert np.all(table["y"] == tx["y"]), "y: {} != {}".format(table["y"], tx["y"])
def test_mask_array(self):
t = fits.open(self.data("table.fits"))
tbdata = t[1].data
mask = tbdata.field("V_mag") > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp("newtable.fits"))
hdul = fits.open(self.temp("newtable.fits"))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
row = t1[1].data[2]
assert row["counts"] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ""
assert (c == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)).all()
row["counts"] = 310
assert row["counts"] == 310
row[1] = 315
assert row["counts"] == 315
assert row[1:4]["counts"] == 315
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
assert row["counts"] == 300
row[1:4][0] = 400
assert row[1:4]["counts"] == 400
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]["counts"] == 500
row[1:4:2][0] = 300
assert row[1:4]["counts"] == 300
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
assert row[1:4].field(0) == 300
assert row[1:4].field("counts") == 300
pytest.raises(KeyError, row[1:4].field, "flag")
row[1:4].setfield("counts", 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, "flag", False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name="target", format="10A")
c2 = fits.Column(name="counts", format="J", unit="DN")
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L")
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = (
"NGC1",
312,
"A Note",
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True,
)
# Test assigning data to a tables row using a list
tbhdu.data[3] = [
"JIM1",
"33",
"A Note",
np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32),
True,
]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == "NGC1"
assert tbhdu.columns.columns[2].array[0] == ""
assert (
tbhdu.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == "JIM1"
assert tbhdu.columns.columns[2].array[3] == "A Note"
assert (
tbhdu.columns.columns[3].array[3]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[3] == np.True_), (bool, np.bool_)
)
and v
)
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.data._coldefs._arrays[0]
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns.columns[0].array
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns._arrays[0]
)
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == "NGC1"
assert tbhdu2.columns.columns[2].array[0] == ""
assert (
tbhdu2.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == "NGC5"
assert tbhdu2.columns.columns[2].array[4] == ""
assert (
tbhdu2.columns.columns[3].array[4]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[4] == np.False_), (bool, np.bool_)
)
and v
)
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ""
assert tbhdu2.columns.columns[2].array[8] == ""
assert (
tbhdu2.columns.columns[3].array[8]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[8] == np.False_), (bool, np.bool_)
)
and v
)
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert id(coldefs.columns[0].array) != id(tbhdu.columns.columns[0].array)
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.data._coldefs._arrays[0]
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns.columns[0].array
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns._arrays[0]
)
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = hducls(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = hducls(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert "EXTVER" not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header["EXTVER"] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header["EXTVER"] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header["EXTVER"] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name="spam", format="E", array=[42.0])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name="flag", format="2L", array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (
tbhdu1.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu1.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (
tbhdu.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data("table.fits"))
assert (tbdata.V_mag == tbdata.field("V_mag")).all()
assert (tbdata.V_mag == tbdata["V_mag"]).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data("tb.fits"))
for col in ("c1", "c2", "c3", "c4"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data("ascii.fits"))
for col in ("a", "b"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(
name="x",
format="PI()",
array=np.array([[45, 56], [11, 12, 13]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data["x"]) == type(hdu.data.x) # noqa: E721
assert (hdu.data["x"][0] == hdu.data.x[0]).all()
assert (hdu.data["x"][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data("zerowidth.fits"))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert "ORBPARM" in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.writeto(self.temp("newtable.fits"))
hdul.close()
hdul = fits.open(self.temp("newtable.fits"))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert "ORBPARM" in tbhdu.columns.names
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.close()
def test_string_column_padding(self):
a = ["img1", "img2", "img3a", "p"]
s = (
"img1\x00\x00\x00\x00\x00\x00"
"img2\x00\x00\x00\x00\x00\x00"
"img3a\x00\x00\x00\x00\x00"
"p\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
acol = fits.Column(name="MEMNAME", format="A10", array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode("raw-unicode-escape") == s
ahdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s.replace(
"\x00", " "
)
assert (hdul[1].data["MEMNAME"] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[
([0, 1, 2, 3, 4, 5], "row1" * 2),
([6, 7, 8, 9, 0, 1], "row2" * 2),
([2, 3, 4, 5, 6, 7], "row3" * 2),
],
formats="6i4,a8",
)
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits"), mode="update") as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header["TDIM1"] = "(2,3)"
hdul[1].header["TDIM2"] = "(4,2)"
with fits.open(self.temp("newtable.fits")) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (
c1
== np.array(
[
[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]],
]
)
).all()
assert (
c2 == np.array([["row1", "row1"], ["row2", "row2"], ["row3", "row3"]])
).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", 4)])
data["x"] = 1, 2, 3
data["s"] = "ok"
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", (4, 3))])
data["x"] = 1, 2, 3
data["s"] = "ok"
del t
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1,), (2,)], dtype=([("x", "i4", (1,))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("onedtable.fits"))
with fits.open(self.temp("onedtable.fits")) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header["TDIM1"] == "(1)"
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b"abcd", b"efgh"], [b"ijkl", b"mnop"], [b"qrst", b"uvwx"]]
arr = np.array(
[(data,), (data,), (data,), (data,), (data,)], dtype=[("S", "(3, 2)S4")]
)
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(4,2,3)"
assert tbhdu2.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
assert np.all(tbhdu2.data["S"] == tbhdu.data["S"])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b"ab", b"cd"], [b"ef", b"gh"], [b"ij", b"kl"]]
arr2 = [1, 2, 3, 4, 5]
arr = np.array(
[(arr1, arr2), (arr1, arr2)], dtype=[("a", "(3, 2)S2"), ("b", "5i8")]
)
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp("test.fits"), "wb") as f:
f.write(raw_bytes.replace(b"(2,2,3)", b"(2,2,2)"))
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(2,2,2)"
assert tbhdu2.header["TFORM1"] == "12A"
for row in tbhdu2.data:
assert np.all(row["a"] == [["ab", "cd"], ["ef", "gh"]])
assert np.all(row["b"] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [["abc", "def", "ghi"], ["jkl", "mno", "pqr"], ["stu", "vwx", "yz "]]
recarr = np.rec.array([(data,), (data,)], formats=["(3,3)S3"])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
with fits.open(self.temp("test.fits")) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(["a", "b"], dtype="|S1")
arrb = np.array([["a", "bc"], ["cd", "e"]], dtype="|S2")
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name="str", format="1A", array=arra),
fits.Column(name="strarray", format="4A", dim="(2,2)", array=arrb),
fits.Column(name="intarray", format="4I", dim="(2, 2)", array=arrc),
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data["str"].encode("ascii") == arra).all()
assert (h[1].data["strarray"].encode("ascii") == arrb).all()
assert (h[1].data["intarray"] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [
fits.Column(name="a", format="20I", dim="(2,2)", array=arra),
fits.Column(name="b", format="4I", dim="(2,2)", array=arrb),
]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM1"] == "20I"
assert h[1].header["TFORM2"] == "4I"
assert h[1].header["TDIM1"] == h[1].header["TDIM2"] == "(2,2)"
assert (h[1].data["a"] == arra).all()
assert (h[1].data["b"] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(
VerifyError, fits.Column, name="a", format="2I", dim="(2,2)", array=arra
)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data("tdim.fits")) as hdulist:
assert hdulist[1].data["V_mag"].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
targets = data.field("target")
s = data[:]
assert (s.field("target") == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field("target") == targets[:n]).all()
s = data[n:]
assert (s.field("target") == targets[n:]).all()
s = data[::2]
assert (s.field("target") == targets[::2]).all()
s = data[::-1]
assert (s.field("target") == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data("table.fits")) as hdu:
data = hdu[1].data
data["V_mag"] = 0
assert np.all(data["V_mag"] == 0)
data["V_mag"] = 1
assert np.all(data["V_mag"] == 1)
for container in (list, tuple, np.array):
data["V_mag"] = container([1, 2, 3])
assert np.array_equal(data["V_mag"], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data("table.fits"), mode="readonly") as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array(
[("a", [1, 2, 3, 4], 0.1), ("b", [5, 6, 7, 8], 0.2)], formats="a1,4i4,f8"
)
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name="c0", format="L", array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name="c2", format="B", array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name="c3", format="I", array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name="c4", format="J", array=a4)
a5 = np.array(["a", "abc", "ab"])
c5 = fits.Column(name="c5", format="A3", array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name="c6", format="D", array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j], dtype=np.complex128)
c7 = fits.Column(name="c7", format="M", array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name="c8", format="PJ()", array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp("data.txt")
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name="names", format="I", array=[1])
c2 = fits.Column(name="formats", format="I", array=[2])
c3 = fits.Column(name="other", format="I", array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ["names", "formats", "other"]
assert t.data.formats == ["I"] * 3
assert (t.data["names"] == [1]).all()
assert (t.data["formats"] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats="|b1,|b1")
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp("table.fits"))
data = fits.getdata(self.temp("table.fits"), ext=1)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[("a", "?")])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data["a"] == arr["a"]).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column("F1", "L", array=[True, False])
c2 = fits.Column("F2", "L", array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp("table.fits"))
with fits.open(self.temp("table.fits"), mode="update") as hdul:
hdul[1].data["F1"][1] = True
hdul[1].data["F2"][0] = True
with fits.open(self.temp("table.fits")) as hdul:
assert (hdul[1].data["F1"] == [True, True]).all()
assert (hdul[1].data["F2"] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column(
"F1",
"A3",
null="---",
array=np.array(["1.0", "2.0", "---", "3.0"]),
ascii=True,
)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp("test.fits"))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp("test.fits"), mode="update") as h:
h[1].header["TFORM1"] = "E3"
del h[1].header["TNULL1"]
with fits.open(self.temp("test.fits")) as h:
pytest.raises(ValueError, lambda: h[1].data["F1"])
try:
with fits.open(self.temp("test.fits")) as h:
h[1].data["F1"]
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data"
)
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = " "
c1 = fits.Column(
"F1",
format="I8",
null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True,
)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp("ascii_null.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null.fits"), mode="r+") as h:
nulled = h.read().replace("2 ", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null.fits"), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = "NaN"
c2 = fits.Column(
"F1",
format="F12.8",
null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True,
)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp("ascii_null2.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null2.fits"), mode="r+") as h:
nulled = h.read().replace("3.00000000", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null2.fits"), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("tb.fits")) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["NAXIS"] == 2
assert h[1].header["NAXIS1"] == 12
assert h[1].header["NAXIS2"] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data("table.fits")) as h:
h[1].writeto(self.temp("test.fits"))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert "data" not in h[1].__dict__
with fits.open(self.data("table.fits")) as h1:
with fits.open(self.temp("test.fits")) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data("table.fits"))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data("tb.fits")) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata["c1"] == tbdata2["c1"])
assert np.all(tbdata["c2"] == tbdata2["c2"])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(
tbdata["c3"].astype(np.float32) == tbdata2["c3"].astype(np.float32)
)
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata["c4"], "T", "F") == tbdata2["c4"])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match="Field 2 has a repeat count of 0"):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[("a", "i8"), ("b", "S64"), ("c", ("i4", (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header["NAXIS1"] == 96
assert hdu.header["NAXIS2"] == 0
assert hdu.header["TDIM3"] == "(2,3)"
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data("random_groups.fits"))["DATA"]
col = fits.Column(name="TEST", array=data, dim="(3,1,128,1,1)", format="1152E")
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[1].data["TEST"] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data("tb.fits"))
data2 = fits.getdata(self.data("tb.fits"))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1) :] = data2
mask = merged["c1"] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data("tb.fits")))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([("abc",)], dtype=[("a", "S3")])
fits.writeto(self.temp("test.fits"), data)
with fits.open(self.temp("test.fits"), mode="update") as hdul:
hdul[1].data["a"][0] = "XYZ"
assert hdul[1].data["a"][0] == "XYZ"
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].data["a"][0] == "XYZ"
# Test update but with a non-trivial TDIMn
data = np.array(
[([["abc", "def", "geh"], ["ijk", "lmn", "opq"]],)],
dtype=[("a", ("S3", (2, 3)))],
)
fits.writeto(self.temp("test2.fits"), data)
expected = [["abc", "def", "geh"], ["ijk", "XYZ", "opq"]]
with fits.open(self.temp("test2.fits"), mode="update") as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data["a"][0, 1, 1] = "XYZ"
assert np.all(hdul[1].data["a"][0] == expected)
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
assert np.all(hdul[1].data["a"][0] == expected)
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting("FITS_rec"):
readfile(self.data("memtest.fits"))
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
@pytest.mark.slow
def test_reference_leak2(self, tmp_path):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_connect import TestMultipleHDU
from .test_core import TestCore
t1 = TestCore()
t1.setup_method()
try:
with _refcounting("FITS_rec"):
t1.test_add_del_columns2()
finally:
t1.teardown_method()
del t1
t2 = self.__class__()
for test_name in [
"test_recarray_to_bintablehdu",
"test_numpy_ndarray_to_bintablehdu",
"test_new_table_from_recarray",
"test_new_fitsrec",
]:
t2.setup_method()
try:
with _refcounting("FITS_rec"):
getattr(t2, test_name)()
finally:
t2.teardown_method()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting("FITS_rec"):
t3.test_read(tmp_path)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data("table.fits")) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
msg = (
r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\."
)
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name="A", format="1J", bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
# Test that the file wrote out correctly
with fits.open(self.temp("test.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == data)
# Test updating the unsigned int data
hdu.data["A"][0] = 99
hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(
name="c1",
array=np.array([1], dtype=">i2"),
format="1I",
bscale=1,
bzero=32768,
)
S = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data["c1"][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data["c1"] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data["c1"][0] = 10
assert X[1].data["c1"][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data["c1"][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -(2**22), 10, 2**23], dtype="i4")
i10 = np.array([2**8, 2**31 - 1, -(2**29), 30, 2**31 - 1], dtype="i8")
i20 = np.array([2**16, 2**63 - 1, -(2**63), 40, 2**63 - 1], dtype="i8")
i02 = np.array([2**8, 2**13, -(2**9), 50, 2**13], dtype="i2")
t0 = Table([i08, i08 * 2, i10, i20, i02])
t1 = Table.read(self.data("ascii_i4-i20.fits"))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
def test_ascii_floattypes(self):
"""Test different float formats."""
col1 = fits.Column(
name="a", format="D", array=np.array([11.1, 12.2]), ascii=True
)
col2 = fits.Column(
name="b", format="D16", array=np.array([15.5, 16.6]), ascii=True
)
col3 = fits.Column(
name="c", format="D16.7", array=np.array([1.1, 2.2]), ascii=True
)
hdu = fits.TableHDU.from_columns([col1, col2, col3])
hdu.writeto(self.temp("foo.fits"))
with fits.open(self.temp("foo.fits"), memmap=False) as hdul:
assert comparerecords(hdul[1].data, hdu.data)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert (
len(objgraph.by_type(type_)) <= refcount
), "More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[[0] * 1571] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as toto:
q = toto[1].data.field("QUAL_SPE")
assert (q[0][4:8] == np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith("J(1571)")
for code in ("PJ()", "QJ()"):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name="TESTVLF", format=format_code, array=arr)
col2 = fits.Column(name="TESTSCA", format="J", array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data["TESTSCA"]) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data["TESTVLF"]) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data["TESTVLF"][0] == arr[0]).all()
assert (tb_hdu.data["TESTVLF"][9] == arr[9]).all()
assert (tb_hdu.data["TESTVLF"][10] == ([0] * 10)).all()
assert (tb_hdu.data["TESTVLF"][-1] == ([0] * 10)).all()
for code in ("PJ()", "QJ()"):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array(
[np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array(
[np.array(["a", "b", "c"]), np.array(["d", "e"]), np.array(["f"])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ["a", "ab", "abc"]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[np.arange(1572)] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
data = fits.getdata(self.temp("toto.fits"))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data["QUAL_SPE"], col.array):
assert (row_a == row_b).all()
for code in ("PJ()", "QJ()"):
test(code)
@pytest.mark.skipif(
not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == "win32",
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column("test", format="J", array=np.arange(255))
c1 = fits.Column("A", format="PJ", array=arr1)
c2 = fits.Column("B", format="PJ", array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp("test.fits"), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM2"] == "PJ(255)"
assert h[2].header["TFORM2"] == "PJ(255)"
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp("test.fits")) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp("test2.fits"))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp("test2.fits"), mode="append") as new_hdul:
for _ in range(2):
with fits.open(self.temp("test.fits")) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp("test2.fits")) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data("theap-gap.fits"))
data = hdul[1].data
assert data.shape == (500,)
assert data["i"][497] == 497
assert np.array_equal(data["arr"][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name="var",
format="PI()",
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data["var"].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data("variable_length_table.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data["var"].tolist() == [[45, 56], [11, 12, 13]]
@pytest.mark.skipif(sys.maxsize < 2**32, reason="requires 64-bit system")
@pytest.mark.skipif(sys.platform == "win32", reason="Cannot test on Windows")
@pytest.mark.hugemem
def test_heapsize_P_limit(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10812
Check if the error is raised when the heap size is bigger than what can be
indexed with a 32 bit signed int.
"""
# a matrix with variable length array elements is created
nelem = 2**28
matrix = np.zeros(1, dtype=np.object_)
matrix[0] = np.arange(0.0, float(nelem + 1))
col = fits.Column(name="MATRIX", format=f"PD({nelem})", unit="", array=matrix)
t = fits.BinTableHDU.from_columns([col])
t.name = "MATRIX"
with pytest.raises(
ValueError, match="Please consider using the 'Q' format for your file."
):
t.writeto(self.temp("matrix.fits"))
def test_empty_vla_raw_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/12881
Check if empty vla are correctly read.
"""
columns = [
fits.Column(name="integer", format="B", array=(1, 2)),
fits.Column(name="empty", format="PJ", array=([], [])),
]
fits.BinTableHDU.from_columns(columns).writeto(self.temp("bug.fits"))
with fits.open(self.temp("bug.fits")) as hdu:
np.array_equal(
hdu[1].data["empty"],
[np.array([], dtype=np.int32), np.array([], dtype=np.int32)],
)
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column("TEST", np.dtype(recformat))
c.format == fitsformat
c = fits.Column("TEST", recformat)
c.format == fitsformat
c = fits.Column("TEST", fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column("TEST", "I4")
assert c.format == "I4"
assert c.format.format == "I"
assert c.format.width == 4
c = fits.Column("TEST", "F15.8")
assert c.format == "F15.8"
assert c.format.format == "F"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "E15.8")
assert c.format.format == "E"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "D15.8")
assert c.format.format == "D"
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column("TEST", "F10.0")
assert c.format.format == "F"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "E10.0")
assert c.format.format == "E"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "D10.0")
assert c.format.format == "D"
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column("TEST", "I")
assert c.format == "I"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I", ascii=True)
assert c.format == "I10"
assert c.format.recformat == "i4"
# With specified widths, integer precision should be set appropriately
c = fits.Column("TEST", "I4", ascii=True)
assert c.format == "I4"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I9", ascii=True)
assert c.format == "I9"
assert c.format.recformat == "i4"
c = fits.Column("TEST", "I12", ascii=True)
assert c.format == "I12"
assert c.format.recformat == "i8"
c = fits.Column("TEST", "E")
assert c.format == "E"
assert c.format.recformat == "f4"
c = fits.Column("TEST", "E", ascii=True)
assert c.format == "E15.7"
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column("TEST", "F")
assert c.format == "F16.7"
c = fits.Column("TEST", "D")
assert c.format == "D"
assert c.format.recformat == "f8"
c = fits.Column("TEST", "D", ascii=True)
assert c.format == "D25.17"
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column("TEST", "F5.0", array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["TFORM1"] == "F5.0"
assert hdul[1].data["TEST"].dtype == np.dtype("float64")
assert np.all(hdul[1].data["TEST"] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, "TEST")
assert raw.tobytes() == b" 1. 2. 3."
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[("A", "<u4", (2,)), ("B", ">u2")])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs["A"].bzero
assert 2**15 == col_defs["B"].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(
UserWarning,
match=r"Field 2 has a repeat count " r"of 0 in its format code",
):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name="a", format="D")
b = fits.Column(name="b", format="D")
cols = fits.ColDefs([a, b])
assert cols["a"] == cols[0]
assert cols["b"] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns(
[fits.Column("a", format="D"), fits.Column("b", format="D")]
)
b = table.columns["b"]
table.columns.del_col("b")
assert table.data.dtype.names == ("a",)
b.name = "HELLO"
assert b.name == "HELLO"
assert "TTYPE2" not in table.header
assert table.header["TTYPE1"] == "a"
assert table.columns.names == ["a"]
with pytest.raises(KeyError):
table.columns["b"]
# Make sure updates to the remaining column still work
table.columns.change_name("a", "GOODBYE")
with pytest.raises(KeyError):
table.columns["a"]
assert table.columns["GOODBYE"].name == "GOODBYE"
assert table.data.dtype.names == ("GOODBYE",)
assert table.columns.names == ["GOODBYE"]
assert table.data.columns.names == ["GOODBYE"]
table.columns["GOODBYE"].name = "foo"
with pytest.raises(KeyError):
table.columns["GOODBYE"]
assert table.columns["foo"].name == "foo"
assert table.data.dtype.names == ("foo",)
assert table.columns.names == ["foo"]
assert table.data.columns.names == ["foo"]
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column("xcol", format="5X", array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column("pcol", format="PJ", array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column("qcol", format="QJ", array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format="I", array=[1, 2, 3, 4, 5])
assert "Column name must be a string able to fit" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column(
"col",
format=0,
null="Nan",
disp=1,
coord_type=1,
coord_unit=2,
coord_inc="1",
time_ref_pos=1,
coord_ref_point="1",
coord_ref_value="1",
)
err_msgs = [
"keyword arguments to Column were invalid",
"TFORM",
"TNULL",
"TDISP",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
]
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="B", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="-56", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(
err.value
)
@pytest.mark.parametrize(
"keys",
[
{"TFORM": "Z", "TDISP": "E"},
{"TFORM": "2", "TDISP": "2E"},
{"TFORM": 3, "TDISP": 6.3},
{"TFORM": float, "TDISP": np.float64},
{"TFORM": "", "TDISP": "E.5"},
],
)
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("col", format=keys["TFORM"], disp=keys["TDISP"])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name="a", array=x, format="E")
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header["TTYPE1"]
hdu.columns[0].name = "b"
def test_table_to_hdu():
from astropy.table import Table
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
table.meta["foo"] = "bar"
with pytest.warns(
UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.BinTableHDU(table, header=fits.Header({"TEST": 1}))
assert len(w) == 1
for name in "abc":
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert hdu.header["FOO"] == "bar"
assert hdu.header["TEST"] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)], dtype=[("x", float), ("y", int)]).view(
fits.FITS_rec
)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmp_path):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr["TUNIT1"] = "pixel"
hdr["TUNIT2"] = "m"
hdr["TUNIT3"] = "m"
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr["TCTYP2"] = "RA---TAN"
hdr["TCTYP3"] = "ANGLE"
hdr["TCRVL2"] = -999.0
hdr["TCRVL3"] = -999.0
hdr["TCRPX2"] = 1.0
hdr["TCRPX3"] = 1.0
hdr["TALEN2"] = 16384
hdr["TALEN3"] = 1024
hdr["TCUNI2"] = "angstrom"
hdr["TCUNI3"] = "deg"
# Other non-relevant keywords
hdr["RA"] = 1.5
hdr["DEC"] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special"
)
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == "s"
assert hdu.columns[1].unit == "pixel"
assert hdu.columns[2].unit is None
assert hdu.header["TUNIT1"] == "s"
assert hdu.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert "TCTYP1" not in hdu.header
assert hdu.header["TCTYP2"] == "RA---TAN"
assert hdu.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu.header["RA"] == 1.5
assert hdu.header["DEC"] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attribtues to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmp_path / "test.fits"
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == "s"
assert hdu2.columns[1].unit == "pixel"
assert hdu2.columns[2].unit is None
assert hdu2.header["TUNIT1"] == "s"
assert hdu2.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == "RA---TAN"
assert hdu2.columns[2].coord_type == "ANGLE"
assert "TCTYP1" not in hdu2.header
assert hdu2.header["TCTYP2"] == "RA---TAN"
assert hdu2.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu2.header["RA"] == 1.5
assert hdu2.header["DEC"] == 3.0
def test_empty_table(tmp_path):
ofile = tmp_path / "emptytable.fits"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
ofile = tmp_path / "emptytable.fits.gz"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
def test_a3dtable(tmp_path):
testfile = tmp_path / "test.fits"
hdu = fits.BinTableHDU.from_columns(
[fits.Column(name="FOO", format="J", array=np.arange(10))]
)
hdu.header["XTENSION"] = "A3DTABLE"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].header["XTENSION"] == "A3DTABLE"
with pytest.warns(AstropyUserWarning) as w:
hdul.verify("fix")
assert str(w[0].message) == "Verification reported errors:"
assert str(w[2].message).endswith("Converted the XTENSION keyword to BINTABLE.")
assert hdul[1].header["XTENSION"] == "BINTABLE"
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header["FOO"] = None
hdu.header.cards["FOO"]._value = np.nan
testfile = tmp_path / "test.fits"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / "invalid_unit.fits"
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = "1 / (MeV sr s)"
unit = Unit(invalid_unit)
t = Table({"a": [1, 2, 3]})
t.write(path)
with fits.open(path, mode="update") as hdul:
hdul[1].header["TUNIT1"] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t["a"].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict="silent")
assert isinstance(t["a"].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict="raise")
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict="warn")
|
4282bd7f8842f9239f5491dfdd733c2ab71cf0b18b630356933fa3456514245b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some methods related to ``CDS`` format
reader/writer.
Requires `pyyaml <https://pyyaml.org/>`_ to be installed.
"""
from io import StringIO
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import ascii
from astropy.table import Column, MaskedColumn, Table
from astropy.time import Time
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_almost_equal
test_dat = [
"names e d s i",
"HD81809 1E-7 22.25608 +2 67",
"HD103095 -31.6e5 +27.2500 -9E34 -30",
]
def test_roundtrip_mrt_table():
"""
Tests whether or not the CDS writer can roundtrip a table,
i.e. read a table to ``Table`` object and write it exactly
as it is back to a file. Since, presently CDS uses a
MRT format template while writing, only the Byte-By-Byte
and the data section of the table can be compared between
original and the newly written table.
Further, the CDS Reader does not have capability to recognize
column format from the header of a CDS/MRT table, so this test
can work for a limited set of simple tables, which don't have
whitespaces in the column values or mix-in columns. Because of
this the written table output cannot be directly matched with
the original file and have to be checked against a list of lines.
Masked columns are read properly though, and thus are being tested
during round-tripping.
The difference between ``cdsFunctional2.dat`` file and ``exp_output``
is the following:
* Metadata is different because MRT template is used for writing.
* Spacing between ``Label`` and ``Explanations`` column in the
Byte-By-Byte.
* Units are written as ``[cm.s-2]`` and not ``[cm/s2]``, since both
are valid according to CDS/MRT standard.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 7 A7 --- ID Star ID ",
" 9-12 I4 K Teff [4337/4654] Effective temperature ",
"14-17 F4.2 [cm.s-2] logg [0.77/1.28] Surface gravity ",
"19-22 F4.2 km.s-1 vturb [1.23/1.82] Micro-turbulence velocity",
"24-28 F5.2 [-] [Fe/H] [-2.11/-1.5] Metallicity ",
"30-33 F4.2 [-] e_[Fe/H] ? rms uncertainty on [Fe/H] ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"S05-5 4337 0.77 1.80 -2.07 ",
"S08-229 4625 1.23 1.23 -1.50 ",
"S05-10 4342 0.91 1.82 -2.11 0.14",
"S05-47 4654 1.28 1.74 -1.64 0.16",
]
dat = get_pkg_data_filename(
"data/cdsFunctional2.dat", package="astropy.io.ascii.tests"
)
t = Table.read(dat, format="ascii.mrt")
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
assert lines == exp_output
def test_write_byte_by_byte_units():
t = ascii.read(test_dat)
col_units = [None, u.C, u.kg, u.m / u.s, u.year]
t._set_column_attribute("unit", col_units)
# Add a column with magnitude units.
# Note that magnitude has to be assigned for each value explicitly.
t["magnitude"] = [u.Magnitude(25), u.Magnitude(-9)]
col_units.append(u.mag)
out = StringIO()
t.write(out, format="ascii.mrt")
# Read written table.
tRead = ascii.read(out.getvalue(), format="cds")
assert [tRead[col].unit for col in tRead.columns] == col_units
def test_write_readme_with_default_options():
exp_output = [
"Title:",
"Authors:",
"Table:",
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [-3160000.0/0.01] Description of e",
"16-23 F8.5 --- d [22.25/27.25] Description of d ",
"25-31 E7.1 --- s [-9e+34/2.0] Description of s ",
"33-35 I3 --- i [-30/67] Description of i ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 22.25608 2e+00 67",
"HD103095 -3e+06 27.25000 -9e+34 -30",
]
t = ascii.read(test_dat)
out = StringIO()
t.write(out, format="ascii.mrt")
assert out.getvalue().splitlines() == exp_output
def test_write_empty_table():
out = StringIO()
import pytest
with pytest.raises(NotImplementedError):
Table().write(out, format="ascii.mrt")
def test_write_null_data_values():
exp_output = [
"HD81809 1e-07 22.25608 2.0e+00 67",
"HD103095 -3e+06 27.25000 -9.0e+34 -30",
"Sun 5.3e+27 ",
]
t = ascii.read(test_dat)
t.add_row(
["Sun", "3.25", "0", "5.3e27", "2"], mask=[False, True, True, False, True]
)
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))]
lines = lines[i_secs[-1] + 1 :] # Last section is the data.
assert lines == exp_output
def test_write_byte_by_byte_for_masked_column():
"""
This test differs from the ``test_write_null_data_values``
above in that it tests the column value limits in the Byte-By-Byte
description section for columns whose values are masked.
It also checks the description for columns with same values.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [0.0/0.01]? Description of e ",
"16-17 F2.0 --- d ? Description of d ",
"19-25 E7.1 --- s [-9e+34/2.0] Description of s ",
"27-29 I3 --- i [-30/67] Description of i ",
"31-33 F3.1 --- sameF [5.0/5.0] Description of sameF",
"35-36 I2 --- sameI [20] Description of sameI ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 2e+00 67 5.0 20",
"HD103095 -9e+34 -30 5.0 20",
]
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name="sameF")
t.add_column([20, 20], name="sameI")
t["e"] = MaskedColumn(t["e"], mask=[False, True])
t["d"] = MaskedColumn(t["d"], mask=[True, True])
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
assert lines == exp_output
exp_coord_cols_output = dict(
# fmt: off
generic=[
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-45 I2 h RAh Right Ascension (hour) ',
'47-48 I2 min RAm Right Ascension (minute) ',
'50-62 F13.10 s RAs Right Ascension (second) ',
' 64 A1 --- DE- Sign of Declination ',
'65-66 I2 deg DEd Declination (degree) ',
'68-69 I2 arcmin DEm Declination (arcmin) ',
'71-82 F12.9 arcsec DEs Declination (arcsec) ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 22 02 15.4500000000 -61 39 34.599996000',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
],
positive_de=[
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-45 I2 h RAh Right Ascension (hour) ',
'47-48 I2 min RAm Right Ascension (minute) ',
'50-62 F13.10 s RAs Right Ascension (second) ',
' 64 A1 --- DE- Sign of Declination ',
'65-66 I2 deg DEd Declination (degree) ',
'68-69 I2 arcmin DEm Declination (arcmin) ',
'71-82 F12.9 arcsec DEs Declination (arcsec) ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
],
# fmt: on
galactic=[
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [-3160000.0/0.01] Description of e",
"16-23 F8.5 --- d [22.25/27.25] Description of d ",
"25-31 E7.1 --- s [-9e+34/2.0] Description of s ",
"33-35 I3 --- i [-30/67] Description of i ",
"37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ",
"41-42 I2 --- sameI [20] Description of sameI ",
"44-59 F16.12 deg GLON Galactic Longitude ",
"61-76 F16.12 deg GLAT Galactic Latitude ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 22.25608 2e+00 67 5.0 20 330.071639591690 -45.548080484609",
"HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 330.071639591690 -45.548080484609",
],
ecliptic=[
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [-3160000.0/0.01] Description of e ",
"16-23 F8.5 --- d [22.25/27.25] Description of d ",
"25-31 E7.1 --- s [-9e+34/2.0] Description of s ",
"33-35 I3 --- i [-30/67] Description of i ",
"37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ",
"41-42 I2 --- sameI [20] Description of sameI ",
"44-59 F16.12 deg ELON Ecliptic Longitude (geocentrictrueecliptic)",
"61-76 F16.12 deg ELAT Ecliptic Latitude (geocentrictrueecliptic) ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 22.25608 2e+00 67 5.0 20 306.224208650096 -45.621789850825",
"HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 306.224208650096 -45.621789850825",
],
)
def test_write_coord_cols():
"""
There can only be one such coordinate column in a single table,
because division of columns into individual component columns requires
iterating over the table columns, which will have to be done again
if additional such coordinate columns are present.
"""
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name="sameF")
t.add_column([20, 20], name="sameI")
# Coordinates of ASASSN-15lh
coord = SkyCoord(330.564375, -61.65961111, unit=u.deg)
# Coordinates of ASASSN-14li
coordp = SkyCoord(192.06343503, 17.77402684, unit=u.deg)
cols = [
Column([coord, coordp]), # Generic coordinate column
coordp, # Coordinate column with positive DEC
coord.galactic, # Galactic coordinates
coord.geocentrictrueecliptic, # Ecliptic coordinates
]
# Loop through different types of coordinate columns.
for col, coord_type in zip(cols, exp_coord_cols_output):
exp_output = exp_coord_cols_output[coord_type]
t["coord"] = col
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
# Check the written table.
assert lines == exp_output
# Check if the original table columns remains unmodified.
assert t.colnames == ["names", "e", "d", "s", "i", "sameF", "sameI", "coord"]
def test_write_byte_by_byte_bytes_col_format():
"""
Tests the alignment of Byte counts with respect to hyphen
in the Bytes column of Byte-By-Byte. The whitespace around the
hyphen is govered by the number of digits in the total Byte
count. Single Byte columns should have a single Byte count
without the hyphen.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-21 E12.6 --- e [-3160000.0/0.01] Description of e",
"23-30 F8.5 --- d [22.25/27.25] Description of d ",
"32-38 E7.1 --- s [-9e+34/2.0] Description of s ",
"40-42 I3 --- i [-30/67] Description of i ",
"44-46 F3.1 --- sameF [5.0/5.0] Description of sameF ",
"48-49 I2 --- sameI [20] Description of sameI ",
" 51 I1 --- singleByteCol [2] Description of singleByteCol ",
"53-54 I2 h RAh Right Ascension (hour) ",
"56-57 I2 min RAm Right Ascension (minute) ",
"59-71 F13.10 s RAs Right Ascension (second) ",
" 73 A1 --- DE- Sign of Declination ",
"74-75 I2 deg DEd Declination (degree) ",
"77-78 I2 arcmin DEm Declination (arcmin) ",
"80-91 F12.9 arcsec DEs Declination (arcsec) ",
"--------------------------------------------------------------------------------",
]
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name="sameF")
t.add_column([20, 20], name="sameI")
t["coord"] = SkyCoord(330.564375, -61.65961111, unit=u.deg)
t["singleByteCol"] = [2, 2]
t["e"].format = ".5E"
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))]
# Select only the Byte-By-Byte section.
lines = lines[i_secs[0] : i_secs[-2]]
lines.append("-" * 80) # Append a separator line.
assert lines == exp_output
def test_write_byte_by_byte_wrapping():
"""
Test line wrapping in the description column of the
Byte-By-Byte section of the ReadMe.
"""
exp_output = """\
================================================================================
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- thisIsALongColumnLabel This is a tediously long
description. But they do sometimes
have them. Better to put extra
details in the notes. This is a
tediously long description. But they
do sometimes have them. Better to put
extra details in the notes.
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
--------------------------------------------------------------------------------
"""
t = ascii.read(test_dat)
t.remove_columns(["s", "i"])
description = (
"This is a tediously long description."
+ " But they do sometimes have them."
+ " Better to put extra details in the notes. "
)
t["names"].description = description * 2
t["names"].name = "thisIsALongColumnLabel"
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))]
# Select only the Byte-By-Byte section.
lines = lines[i_secs[0] : i_secs[-2]]
lines.append("-" * 80) # Append a separator line.
assert lines == exp_output.splitlines()
def test_write_mixin_and_broken_cols():
"""
Tests convertion to string values for ``mix-in`` columns other than
``SkyCoord`` and for columns with only partial ``SkyCoord`` values.
"""
# fmt: off
exp_output = [
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 7 A7 --- name Description of name ',
' 9- 74 A66 --- Unknown Description of Unknown',
' 76-114 A39 --- Unknown Description of Unknown',
'116-138 A23 --- Unknown Description of Unknown',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 <SkyCoord (ICRS): (ra, dec) in deg',
' (330.564375, -61.65961111)> (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000', # noqa: E501
'random 12 (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000', # noqa: E501
]
# fmt: on
t = Table()
t["name"] = ["HD81809"]
coord = SkyCoord(330.564375, -61.65961111, unit=u.deg)
t["coord"] = Column(coord)
t.add_row(["random", 12])
t["cart"] = coord.cartesian
t["time"] = Time("2019-1-1")
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
# Check the written table.
assert lines == exp_output
def test_write_extra_skycoord_cols():
"""
Tests output for cases when table contains multiple ``SkyCoord`` columns.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 7 A7 --- name Description of name ",
" 9-10 I2 h RAh Right Ascension (hour) ",
"12-13 I2 min RAm Right Ascension (minute)",
"15-27 F13.10 s RAs Right Ascension (second)",
" 29 A1 --- DE- Sign of Declination ",
"30-31 I2 deg DEd Declination (degree) ",
"33-34 I2 arcmin DEm Declination (arcmin) ",
"36-47 F12.9 arcsec DEs Declination (arcsec) ",
"49-62 A14 --- coord2 Description of coord2 ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD4760 0 49 39.9000000000 +06 24 07.999200000 12.4163 6.407 ",
"HD81809 22 02 15.4500000000 -61 39 34.599996000 330.564 -61.66",
]
t = Table()
t["name"] = ["HD4760", "HD81809"]
t["coord1"] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg)
t["coord2"] = SkyCoord([12.41630, 330.564400], [6.407, -61.66], unit=u.deg)
out = StringIO()
with pytest.warns(
UserWarning,
match=r"column 2 is being skipped with designation of a "
r"string valued column `coord2`",
):
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines.
# Check the written table.
assert lines[:-2] == exp_output[:-2]
for a, b in zip(lines[-2:], exp_output[-2:]):
assert a[:18] == b[:18]
assert a[30:42] == b[30:42]
assert_almost_equal(
np.fromstring(a[2:], sep=" "), np.fromstring(b[2:], sep=" ")
)
def test_write_skycoord_with_format():
"""
Tests output with custom setting for ``SkyCoord`` (second) columns.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 7 A7 --- name Description of name ",
" 9-10 I2 h RAh Right Ascension (hour) ",
"12-13 I2 min RAm Right Ascension (minute)",
"15-19 F5.2 s RAs Right Ascension (second)",
" 21 A1 --- DE- Sign of Declination ",
"22-23 I2 deg DEd Declination (degree) ",
"25-26 I2 arcmin DEm Declination (arcmin) ",
"28-31 F4.1 arcsec DEs Declination (arcsec) ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD4760 0 49 39.90 +06 24 08.0",
"HD81809 22 02 15.45 -61 39 34.6",
]
t = Table()
t["name"] = ["HD4760", "HD81809"]
t["coord"] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg)
out = StringIO()
# This will raise a warning because `formats` is checked before the writer creating the
# final list of columns is called.
with pytest.warns(
AstropyWarning,
match=r"The key.s. {'[RD][AE]s', '[RD][AE]s'} specified in "
r"the formats argument do not match a column name.",
):
t.write(out, format="ascii.mrt", formats={"RAs": "05.2f", "DEs": "04.1f"})
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines.
# Check the written table.
assert lines == exp_output
|
b3de76b8281e30193c41a848d88f02f10f25977057cfcdcc5f89bc9d188fc63c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import io
import os
import re
from contextlib import nullcontext
from io import BytesIO
from textwrap import dedent
import numpy as np
import pytest
from numpy import ma
from astropy.io import ascii
from astropy.io.ascii.core import (
FastOptionsError,
InconsistentTableError,
ParameterError,
)
from astropy.io.ascii.fastbasic import (
FastBasic,
FastCommentedHeader,
FastCsv,
FastNoHeader,
FastRdb,
FastTab,
)
from astropy.table import MaskedColumn, Table
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_almost_equal, assert_equal, assert_true
StringIO = lambda x: BytesIO(x.encode("ascii")) # noqa: E731
CI = os.environ.get("CI", False)
def assert_table_equal(t1, t2, check_meta=False, rtol=1.0e-15, atol=1.0e-300):
"""
Test equality of all columns in a table, with stricter tolerances for
float columns than the np.allclose default.
"""
assert_equal(len(t1), len(t2))
assert_equal(t1.colnames, t2.colnames)
if check_meta:
assert_equal(t1.meta, t2.meta)
for name in t1.colnames:
if len(t1) != 0:
assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)
if not isinstance(t1[name], MaskedColumn):
for i, el in enumerate(t1[name]):
try:
if not isinstance(el, str) and np.isnan(el):
assert_true(
not isinstance(t2[name][i], str) and np.isnan(t2[name][i])
)
elif isinstance(el, str):
assert_equal(el, t2[name][i])
else:
assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol)
except (TypeError, NotImplementedError):
pass # ignore for now
# Use this counter to create a unique filename for each file created in a test
# if this function is called more than once in a single test
_filename_counter = 0
def _read(
tmp_path,
table,
Reader=None,
format=None,
parallel=False,
check_meta=False,
**kwargs,
):
# make sure we have a newline so table can't be misinterpreted as a filename
global _filename_counter
table += "\n"
reader = Reader(**kwargs)
t1 = reader.read(table)
t2 = reader.read(StringIO(table))
t3 = reader.read(table.splitlines())
t4 = ascii.read(table, format=format, guess=False, **kwargs)
t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs)
assert_table_equal(t1, t2, check_meta=check_meta)
assert_table_equal(t2, t3, check_meta=check_meta)
assert_table_equal(t3, t4, check_meta=check_meta)
assert_table_equal(t4, t5, check_meta=check_meta)
if parallel:
if CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
t6 = ascii.read(
table, format=format, guess=False, fast_reader={"parallel": True}, **kwargs
)
assert_table_equal(t1, t6, check_meta=check_meta)
filename = tmp_path / f"table{_filename_counter}.txt"
_filename_counter += 1
with open(filename, "wb") as f:
f.write(table.encode("ascii"))
f.flush()
t7 = ascii.read(filename, format=format, guess=False, **kwargs)
if parallel:
t8 = ascii.read(
filename,
format=format,
guess=False,
fast_reader={"parallel": True},
**kwargs,
)
assert_table_equal(t1, t7, check_meta=check_meta)
if parallel:
assert_table_equal(t1, t8, check_meta=check_meta)
return t1
@pytest.fixture(scope="function")
def read_basic(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastBasic, format="basic")
@pytest.fixture(scope="function")
def read_csv(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastCsv, format="csv")
@pytest.fixture(scope="function")
def read_tab(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastTab, format="tab")
@pytest.fixture(scope="function")
def read_commented_header(tmp_path, request):
return functools.partial(
_read, tmp_path, Reader=FastCommentedHeader, format="commented_header"
)
@pytest.fixture(scope="function")
def read_rdb(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastRdb, format="rdb")
@pytest.fixture(scope="function")
def read_no_header(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastNoHeader, format="no_header")
@pytest.mark.parametrize("delimiter", [",", "\t", " ", "csv"])
@pytest.mark.parametrize("quotechar", ['"', "'"])
@pytest.mark.parametrize("fast", [False, True])
def test_embedded_newlines(delimiter, quotechar, fast):
"""Test that embedded newlines are supported for io.ascii readers
and writers, both fast and Python readers."""
# Start with an assortment of values with different embedded newlines and whitespace
dat = [
["\t a ", " b \n cd ", "\n"],
[" 1\n ", '2 \n" \t 3\n4\n5', "1\n '2\n"],
[" x,y \nz\t", "\t 12\n\t34\t ", "56\t\n"],
]
dat = Table(dat, names=("a", "b", "c"))
# Construct a table which is our expected result of writing the table and
# reading it back. Certain stripping of whitespace is expected.
exp = {} # expected output from reading
for col in dat.itercols():
vals = []
for val in col:
# Readers and writers both strip whitespace from ends of values
val = val.strip(" \t")
if not fast:
# Pure Python reader has a "feature" where it strips trailing
# whitespace from each input line. This means a value like
# " x \ny \t\n" gets read as "x\ny".
bits = val.splitlines(keepends=True)
bits_out = []
for bit in bits:
bit = re.sub(r"[ \t]+(\n?)$", r"\1", bit.strip(" \t"))
bits_out.append(bit)
val = "".join(bits_out)
vals.append(val)
exp[col.info.name] = vals
exp = Table(exp)
if delimiter == "csv":
format = "csv"
delimiter = ","
else:
format = "basic"
# Write the table to `text`
fh = io.StringIO()
ascii.write(
dat,
fh,
format=format,
delimiter=delimiter,
quotechar=quotechar,
fast_writer=fast,
)
text = fh.getvalue()
# Read it back and compare to the expected
dat_out = ascii.read(
text,
format=format,
guess=False,
delimiter=delimiter,
quotechar=quotechar,
fast_reader=fast,
)
eq = dat_out.values_equal(exp)
assert all(np.all(col) for col in eq.itercols())
@pytest.mark.parametrize("parallel", [True, False])
def test_simple_data(parallel, read_basic):
"""
Make sure the fast reader works with basic input data.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(table, expected)
def test_read_types():
"""
Make sure that the read() function takes filenames,
strings, and lists of strings in addition to file-like objects.
"""
t1 = ascii.read("a b c\n1 2 3\n4 5 6", format="fast_basic", guess=False)
# TODO: also read from file
t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format="fast_basic", guess=False)
t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format="fast_basic", guess=False)
assert_table_equal(t1, t2)
assert_table_equal(t2, t3)
@pytest.mark.parametrize("parallel", [True, False])
def test_supplied_names(parallel, read_basic):
"""
If passed as a parameter, names should replace any
column names found in the header.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", names=("X", "Y", "Z"), parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("X", "Y", "Z"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header(parallel, read_basic, read_no_header):
"""
The header should not be read when header_start=None. Unless names is
passed, the column names should be auto-generated.
"""
# Cannot set header_start=None for basic format
with pytest.raises(ValueError):
read_basic(
"A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel
)
t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table(
[["A", "1", "4"], ["B", "2", "5"], ["C", "3", "6"]],
names=("col1", "col2", "col3"),
)
assert_table_equal(t2, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header_supplied_names(parallel, read_basic, read_no_header):
"""
If header_start=None and names is passed as a parameter, header
data should not be read and names should be used instead.
"""
table = read_no_header(
"A B C\n1 2 3\n4 5 6", names=("X", "Y", "Z"), parallel=parallel
)
expected = Table(
[["A", "1", "4"], ["B", "2", "5"], ["C", "3", "6"]], names=("X", "Y", "Z")
)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_comment(parallel, read_basic):
"""
Make sure that line comments are ignored by the C reader.
"""
table = read_basic(
"# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel
)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_lines(parallel, read_basic):
"""
Make sure that empty lines are ignored by the C reader.
"""
table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_lstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the beginning of fields.
"""
text = """
1, 2, \t3
A,\t\t B, C
a, b, c
\n"""
table = read_basic(text, delimiter=",", parallel=parallel)
expected = Table([["A", "a"], ["B", "b"], ["C", "c"]], names=("1", "2", "3"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_rstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the end of fields.
"""
text = " 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n"
table = read_basic(text, delimiter=",", parallel=parallel)
expected = Table([["A", "a"], ["B", "b"], ["C", "c"]], names=("1", "2", "3"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_conversion(parallel, read_basic):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = read_basic(text, parallel=parallel)
assert_equal(table["A"].dtype.kind, "f")
assert table["B"].dtype.kind in ("S", "U")
assert_equal(table["C"].dtype.kind, "i")
assert_equal(table["D"].dtype.kind, "f")
assert table["E"].dtype.kind in ("S", "U")
assert table["F"].dtype.kind in ("S", "U")
assert table["G"].dtype.kind in ("S", "U")
assert table["H"].dtype.kind in ("S", "U")
@pytest.mark.parametrize("parallel", [True, False])
def test_delimiter(parallel, read_basic):
"""
Make sure that different delimiters work as expected.
"""
text = dedent(
"""
COL1 COL2 COL3
1 A -1
2 B -2
"""
)
expected = Table([[1, 2], ["A", "B"], [-1, -2]], names=("COL1", "COL2", "COL3"))
for sep in " ,\t#;":
table = read_basic(text.replace(" ", sep), delimiter=sep, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_names(parallel, read_basic):
"""
If include_names is not None, the parser should read only those columns in include_names.
"""
table = read_basic(
"A B C D\n1 2 3 4\n5 6 7 8", include_names=["A", "D"], parallel=parallel
)
expected = Table([[1, 5], [4, 8]], names=("A", "D"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_exclude_names(parallel, read_basic):
"""
If exclude_names is not None, the parser should exclude the columns in exclude_names.
"""
table = read_basic(
"A B C D\n1 2 3 4\n5 6 7 8", exclude_names=["A", "D"], parallel=parallel
)
expected = Table([[2, 6], [3, 7]], names=("B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_exclude_names(parallel, read_basic):
"""
Make sure that include_names is applied before exclude_names if both are specified.
"""
text = dedent(
"""
A B C D E F G H
1 2 3 4 5 6 7 8
9 10 11 12 13 14 15 16
"""
)
table = read_basic(
text,
include_names=["A", "B", "D", "F", "H"],
exclude_names=["B", "F"],
parallel=parallel,
)
expected = Table([[1, 9], [4, 12], [8, 16]], names=("A", "D", "H"))
assert_table_equal(table, expected)
def test_doubled_quotes(read_csv):
"""
Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted
field was incorrect.
"""
# fmt: off
tbl = '\n'.join(['a,b',
'"d""","d""q"',
'"""q",""""'])
expected = Table([['d"', '"q'],
['d"q', '"']],
names=('a', 'b'))
# fmt: on
dat = read_csv(tbl)
assert_table_equal(dat, expected)
# In addition to the local read_csv wrapper, check that default
# parsing with guessing gives the right answer.
for fast_reader in True, False:
dat = ascii.read(tbl, fast_reader=fast_reader)
assert_table_equal(dat, expected)
@pytest.mark.filterwarnings(
"ignore:OverflowError converting to IntType in column TIMESTAMP"
)
def test_doubled_quotes_segv():
"""
Test the exact example from #8281 which resulted in SEGV prior to #8283
(in contrast to the tests above that just gave the wrong answer).
Attempts to produce a more minimal example were unsuccessful, so the whole
thing is included.
"""
tbl = dedent(
"""
"ID","TIMESTAMP","addendum_id","bib_reference","bib_reference_url","client_application","client_category","client_sort_key","color","coordsys","creator","creator_did","data_pixel_bitpix","dataproduct_subtype","dataproduct_type","em_max","em_min","format","hips_builder","hips_copyright","hips_creation_date","hips_creation_date_1","hips_creator","hips_data_range","hips_estsize","hips_frame","hips_glu_tag","hips_hierarchy","hips_initial_dec","hips_initial_fov","hips_initial_ra","hips_lon_asc","hips_master_url","hips_order","hips_order_1","hips_order_4","hips_order_min","hips_overlay","hips_pixel_bitpix","hips_pixel_cut","hips_pixel_scale","hips_progenitor_url","hips_publisher","hips_release_date","hips_release_date_1","hips_rgb_blue","hips_rgb_green","hips_rgb_red","hips_sampling","hips_service_url","hips_service_url_1","hips_service_url_2","hips_service_url_3","hips_service_url_4","hips_service_url_5","hips_service_url_6","hips_service_url_7","hips_service_url_8","hips_skyval","hips_skyval_method","hips_skyval_value","hips_status","hips_status_1","hips_status_2","hips_status_3","hips_status_4","hips_status_5","hips_status_6","hips_status_7","hips_status_8","hips_tile_format","hips_tile_format_1","hips_tile_format_4","hips_tile_width","hips_version","hipsgen_date","hipsgen_date_1","hipsgen_date_10","hipsgen_date_11","hipsgen_date_12","hipsgen_date_2","hipsgen_date_3","hipsgen_date_4","hipsgen_date_5","hipsgen_date_6","hipsgen_date_7","hipsgen_date_8","hipsgen_date_9","hipsgen_params","hipsgen_params_1","hipsgen_params_10","hipsgen_params_11","hipsgen_params_12","hipsgen_params_2","hipsgen_params_3","hipsgen_params_4","hipsgen_params_5","hipsgen_params_6","hipsgen_params_7","hipsgen_params_8","hipsgen_params_9","label","maxOrder","moc_access_url","moc_order","moc_release_date","moc_sky_fraction","obs_ack","obs_collection","obs_copyrigh_url","obs_copyright","obs_copyright_1","obs_copyright_url","obs_copyright_url_1","obs_description","obs_description_url","obs_descrition_url","obs_id","obs_initial_dec","obs_initial_fov","obs_initial_ra","obs_provenance","obs_regime","obs_title","ohips_frame","pixelCut","pixelRange","prov_did","prov_progenitor","prov_progenitor_url","publisher_did","publisher_id","s_pixel_scale","t_max","t_min"
"CDS/P/2MASS/H","1524123841000","","2006AJ....131.1163S","http://cdsbib.u-strasbg.fr/cgi-bin/cdsbib?2006AJ....131.1163S","AladinDesktop","Image/Infrared/2MASS","04-001-03","","","","ivo://CDS/P/2MASS/H","","","image","1.798E-6","1.525E-6","","Aladin/HipsGen v9.017","CNRS/Unistra","2013-05-06T20:36Z","","CDS (A.Oberto)","","","equatorial","","mean","","","","","","9","","","","","","0 60","2.236E-4","","","2016-04-22T13:48Z","","","","","","http://alasky.u-strasbg.fr/2MASS/H","https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H","http://alaskybis.u-strasbg.fr/2MASS/H","https://alaskybis.u-strasbg.fr/2MASS/H","","","","","","","","","public master clonableOnce","public mirror unclonable","public mirror clonableOnce","public mirror clonableOnce","","","","","","jpeg fits","","","512","1.31","","","","","","","","","","","","","","","","","","","","","","","","","","","","","http://alasky.u-strasbg.fr/2MASS/H/Moc.fits","9","","1","University of Massachusetts & IPAC/Caltech","The Two Micron All Sky Survey - H band (2MASS H)","","University of Massachusetts & IPAC/Caltech","","http://www.ipac.caltech.edu/2mass/","","2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0"". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF","","","","+0","0.11451621372724685","0","","Infrared","2MASS H (1.66um)","","","","","IPAC/NASA","","","","","51941","50600"
"""
)
ascii.read(tbl, format="csv", fast_reader=True, guess=False)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_fields(parallel, read_basic):
"""
The character quotechar (default '"') should denote the start of a field which can
contain the field delimiter and newlines.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = dedent(
"""
"A B" C D
1.5 2.1 -37.1
a b " c
d"
"""
)
table = read_basic(text, parallel=parallel)
expected = Table(
[["1.5", "a"], ["2.1", "b"], ["-37.1", "c\nd"]], names=("A B", "C", "D")
)
assert_table_equal(table, expected)
table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize(
"key,val",
[
("delimiter", ",,"), # multi-char delimiter
("comment", "##"), # multi-char comment
("data_start", None), # data_start=None
("data_start", -1), # data_start negative
("quotechar", "##"), # multi-char quote signifier
("header_start", -1), # negative header_start
(
"converters",
{i + 1: ascii.convert_numpy(np.uint) for i in range(3)},
), # passing converters
("Inputter", ascii.ContinuationLinesInputter), # passing Inputter
("header_Splitter", ascii.DefaultSplitter), # passing Splitter
("data_Splitter", ascii.DefaultSplitter),
],
)
def test_invalid_parameters(key, val):
"""
Make sure the C reader raises an error if passed parameters it can't handle.
"""
with pytest.raises(ParameterError):
FastBasic(**{key: val}).read("1 2 3\n4 5 6")
with pytest.raises(ParameterError):
ascii.read("1 2 3\n4 5 6", format="fast_basic", guess=False, **{key: val})
def test_invalid_parameters_other():
with pytest.raises(TypeError):
FastBasic(foo=7).read("1 2 3\n4 5 6") # unexpected argument
with pytest.raises(FastOptionsError): # don't fall back on the slow reader
ascii.read("1 2 3\n4 5 6", format="basic", fast_reader={"foo": 7})
with pytest.raises(ParameterError):
# Outputter cannot be specified in constructor
FastBasic(Outputter=ascii.TableOutputter).read("1 2 3\n4 5 6")
def test_too_many_cols1():
"""
If a row contains too many columns, the C reader should raise an error.
"""
text = dedent(
"""
A B C
1 2 3
4 5 6
7 8 9 10
11 12 13
"""
)
with pytest.raises(InconsistentTableError) as e:
FastBasic().read(text)
assert (
"Number of header columns (3) inconsistent with data columns in data line 2"
in str(e.value)
)
def test_too_many_cols2():
text = """\
aaa,bbb
1,2,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert (
"Number of header columns (2) inconsistent with data columns in data line 0"
in str(e.value)
)
def test_too_many_cols3():
text = """\
aaa,bbb
1,2,,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert (
"Number of header columns (2) inconsistent with data columns in data line 0"
in str(e.value)
)
def test_too_many_cols4():
# https://github.com/astropy/astropy/issues/9922
with pytest.raises(InconsistentTableError) as e:
ascii.read(
get_pkg_data_filename("data/conf_py.txt"), fast_reader=True, guess=True
)
assert "Unable to guess table format with the guesses listed below" in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_not_enough_cols(parallel, read_csv):
"""
If a row does not have enough columns, the FastCsv reader should add empty
fields while the FastBasic reader should raise an error.
"""
text = """
A,B,C
1,2,3
4,5
6,7,8
"""
table = read_csv(text, parallel=parallel)
assert table["B"][1] is not ma.masked
assert table["C"][1] is ma.masked
with pytest.raises(InconsistentTableError):
table = FastBasic(delimiter=",").read(text)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_end(parallel, read_basic, read_rdb):
"""
The parameter data_end should specify where data reading ends.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9
10 11 12
"""
table = read_basic(text, data_end=3, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(table, expected)
# data_end supports negative indexing
table = read_basic(text, data_end=-2, parallel=parallel)
assert_table_equal(table, expected)
text = """
A\tB\tC
N\tN\tS
1\t2\ta
3\t4\tb
5\t6\tc
"""
# make sure data_end works with RDB
table = read_rdb(text, data_end=-1, parallel=parallel)
expected = Table([[1, 3], [2, 4], ["a", "b"]], names=("A", "B", "C"))
assert_table_equal(table, expected)
# positive index
table = read_rdb(text, data_end=3, parallel=parallel)
expected = Table([[1], [2], ["a"]], names=("A", "B", "C"))
assert_table_equal(table, expected)
# empty table if data_end is too small
table = read_rdb(text, data_end=1, parallel=parallel)
expected = Table([[], [], []], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_inf_nan(parallel, read_basic):
"""
Test that inf and nan-like values are correctly parsed on all platforms.
Regression test for https://github.com/astropy/astropy/pull/3525
"""
text = dedent(
"""\
A
nan
+nan
-nan
inf
infinity
+inf
+infinity
-inf
-infinity
"""
)
expected = Table(
{
"A": [
np.nan,
np.nan,
np.nan,
np.inf,
np.inf,
np.inf,
np.inf,
-np.inf,
-np.inf,
]
}
)
table = read_basic(text, parallel=parallel)
assert table["A"].dtype.kind == "f"
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_values(parallel, read_basic):
"""
Make sure that the parameter fill_values works as intended. If fill_values
is not specified, the default behavior should be to convert '' to 0.
"""
text = """
A, B, C
, 2, nan
a, -999, -3.4
nan, 5, -9999
8, nan, 7.6e12
"""
table = read_basic(text, delimiter=",", parallel=parallel)
# The empty value in row A should become a masked '0'
assert isinstance(table["A"], MaskedColumn)
assert table["A"][0] is ma.masked
# '0' rather than 0 because there is a string in the column
assert_equal(table["A"].data.data[0], "0")
assert table["A"][1] is not ma.masked
table = read_basic(
text, delimiter=",", fill_values=("-999", "0"), parallel=parallel
)
assert isinstance(table["B"], MaskedColumn)
assert table["A"][0] is not ma.masked # empty value unaffected
assert table["C"][2] is not ma.masked # -9999 is not an exact match
assert table["B"][1] is ma.masked
# Numeric because the rest of the column contains numeric data
assert_equal(table["B"].data.data[1], 0.0)
assert table["B"][0] is not ma.masked
table = read_basic(text, delimiter=",", fill_values=[], parallel=parallel)
# None of the columns should be masked
for name in "ABC":
assert not isinstance(table[name], MaskedColumn)
table = read_basic(
text,
delimiter=",",
fill_values=[("", "0", "A"), ("nan", "999", "A", "C")],
parallel=parallel,
)
assert np.isnan(table["B"][3]) # nan filling skips column B
# should skip masking as well as replacing nan
assert table["B"][3] is not ma.masked
assert table["A"][0] is ma.masked
assert table["A"][2] is ma.masked
assert_equal(table["A"].data.data[0], "0")
assert_equal(table["A"].data.data[2], "999")
assert table["C"][0] is ma.masked
assert_almost_equal(table["C"].data.data[0], 999.0)
assert_almost_equal(table["C"][1], -3.4) # column is still of type float
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_include_exclude_names(parallel, read_csv):
"""
fill_include_names and fill_exclude_names should filter missing/empty value handling
in the same way that include_names and exclude_names filter output columns.
"""
text = """
A, B, C
, 1, 2
3, , 4
5, 5,
"""
table = read_csv(text, fill_include_names=["A", "B"], parallel=parallel)
assert table["A"][0] is ma.masked
assert table["B"][1] is ma.masked
assert table["C"][2] is not ma.masked # C not in fill_include_names
table = read_csv(text, fill_exclude_names=["A", "B"], parallel=parallel)
assert table["C"][2] is ma.masked
assert table["A"][0] is not ma.masked
assert table["B"][1] is not ma.masked # A and B excluded from fill handling
table = read_csv(
text, fill_include_names=["A", "B"], fill_exclude_names=["B"], parallel=parallel
)
assert table["A"][0] is ma.masked
# fill_exclude_names applies after fill_include_names
assert table["B"][1] is not ma.masked
assert table["C"][2] is not ma.masked
@pytest.mark.parametrize("parallel", [True, False])
def test_many_rows(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of rows
is large (so that each column string is longer than INITIAL_COL_SIZE).
"""
text = "A B C\n"
for i in range(500): # create 500 rows
text += " ".join([str(i) for i in range(3)])
text += "\n"
table = read_basic(text, parallel=parallel)
expected = Table([[0] * 500, [1] * 500, [2] * 500], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_many_columns(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of columns
is large (so that each header string is longer than INITIAL_HEADER_SIZE).
"""
# create a string with 500 columns and two data rows
text = " ".join([str(i) for i in range(500)])
text += "\n" + text + "\n" + text
table = read_basic(text, parallel=parallel)
expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])
assert_table_equal(table, expected)
def test_fast_reader():
"""
Make sure that ascii.read() works as expected by default and with
fast_reader specified.
"""
text = "a b c\n1 2 3\n4 5 6"
with pytest.raises(ParameterError): # C reader can't handle regex comment
ascii.read(text, format="fast_basic", guess=False, comment="##")
# Enable multiprocessing and the fast converter
try:
ascii.read(
text,
format="basic",
guess=False,
fast_reader={"parallel": True, "use_fast_converter": True},
)
except NotImplementedError:
# Might get this on Windows, try without parallel...
if os.name == "nt":
ascii.read(
text,
format="basic",
guess=False,
fast_reader={"parallel": False, "use_fast_converter": True},
)
else:
raise
# Should raise an error if fast_reader has an invalid key
with pytest.raises(FastOptionsError):
ascii.read(text, format="fast_basic", guess=False, fast_reader={"foo": True})
# Use the slow reader instead
ascii.read(text, format="basic", guess=False, comment="##", fast_reader=False)
# Will try the slow reader afterwards by default
ascii.read(text, format="basic", guess=False, comment="##")
@pytest.mark.parametrize("parallel", [True, False])
def test_read_tab(parallel, read_tab):
"""
The fast reader for tab-separated values should not strip whitespace, unlike
the basic reader.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t '
table = read_tab(text, parallel=parallel)
assert_equal(table["1"][0], " a") # preserve line whitespace
assert_equal(table["2"][0], " b ") # preserve field whitespace
assert table["3"][0] is ma.masked # empty value should be masked
assert_equal(table["2"][1], " d\n e") # preserve whitespace in quoted fields
assert_equal(table["3"][1], " ") # preserve end-of-line whitespace
@pytest.mark.parametrize("parallel", [True, False])
def test_default_data_start(parallel, read_basic):
"""
If data_start is not explicitly passed to read(), data processing should
beginning right after the header.
"""
text = "ignore this line\na b c\n1 2 3\n4 5 6"
table = read_basic(text, header_start=1, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("a", "b", "c"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_commented_header(parallel, read_commented_header):
"""
The FastCommentedHeader reader should mimic the behavior of the
CommentedHeader by overriding the default header behavior of FastBasic.
"""
text = """
# A B C
1 2 3
4 5 6
"""
t1 = read_commented_header(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(t1, expected)
text = "# first commented line\n # second commented line\n\n" + text
t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel)
assert_table_equal(t2, expected)
# negative indexing allowed
t3 = read_commented_header(text, header_start=-1, data_start=0, parallel=parallel)
assert_table_equal(t3, expected)
text += "7 8 9"
t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel)
expected = Table([[7], [8], [9]], names=("A", "B", "C"))
assert_table_equal(t4, expected)
with pytest.raises(ParameterError):
# data_start cannot be negative
read_commented_header(text, header_start=-1, data_start=-1, parallel=parallel)
@pytest.mark.parametrize("parallel", [True, False])
def test_rdb(parallel, read_rdb):
"""
Make sure the FastRdb reader works as expected.
"""
text = """
A\tB\tC
1n\tS\t4N
1\t 9\t4.3
"""
table = read_rdb(text, parallel=parallel)
expected = Table([[1], [" 9"], [4.3]], names=("A", "B", "C"))
assert_table_equal(table, expected)
assert_equal(table["A"].dtype.kind, "i")
assert table["B"].dtype.kind in ("S", "U")
assert_equal(table["C"].dtype.kind, "f")
with pytest.raises(ValueError) as e:
text = "A\tB\tC\nN\tS\tN\n4\tb\ta" # C column contains non-numeric data
read_rdb(text, parallel=parallel)
assert "Column C failed to convert" in str(e.value)
with pytest.raises(ValueError) as e:
text = "A\tB\tC\nN\tN\n1\t2\t3" # not enough types specified
read_rdb(text, parallel=parallel)
assert "mismatch between number of column names and column types" in str(e.value)
with pytest.raises(ValueError) as e:
text = "A\tB\tC\nN\tN\t5\n1\t2\t3" # invalid type for column C
read_rdb(text, parallel=parallel)
assert "type definitions do not all match [num](N|S)" in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_start(parallel, read_basic):
"""
Make sure that data parsing begins at data_start (ignoring empty and
commented lines but not taking quoted values into account).
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
A B C
1 2 3
4 5 6
7 8 "9
1"
# comment
10 11 12
"""
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table(
[[4, 7, 10], [5, 8, 11], ["6", "9\n1", "12"]], names=("A", "B", "C")
)
assert_table_equal(table, expected)
table = read_basic(text, data_start=3, parallel=parallel)
# ignore empty line
expected = Table([[7, 10], [8, 11], ["9\n1", "12"]], names=("A", "B", "C"))
assert_table_equal(table, expected)
with pytest.raises(InconsistentTableError) as e:
# tries to begin in the middle of quoted field
read_basic(text, data_start=4, parallel=parallel)
assert "header columns (3) inconsistent with data columns in data line 0" in str(
e.value
)
table = read_basic(text, data_start=5, parallel=parallel)
# ignore commented line
expected = Table([[10], [11], [12]], names=("A", "B", "C"))
assert_table_equal(table, expected)
text = """
A B C
1 2 3
4 5 6
7 8 9
# comment
10 11 12
"""
# make sure reading works as expected in parallel
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_empty_values(parallel, read_basic):
"""
Quoted empty values spanning multiple lines should be treated correctly.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = 'a b c\n1 2 " \n "'
table = read_basic(text, parallel=parallel)
assert table["c"][0] == "\n" # empty value masked by default
@pytest.mark.parametrize("parallel", [True, False])
def test_csv_comment_default(parallel, read_csv):
"""
Unless the comment parameter is specified, the CSV reader should
not treat any lines as comments.
"""
text = "a,b,c\n#1,2,3\n4,5,6"
table = read_csv(text, parallel=parallel)
expected = Table([["#1", "4"], [2, 5], [3, 6]], names=("a", "b", "c"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_whitespace_before_comment(parallel, read_tab):
"""
Readers that don't strip whitespace from data (Tab, RDB)
should still treat lines with leading whitespace and then
the comment char as comment lines.
"""
text = "a\tb\tc\n # comment line\n1\t2\t3"
table = read_tab(text, parallel=parallel)
expected = Table([[1], [2], [3]], names=("a", "b", "c"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_strip_line_trailing_whitespace(parallel, read_basic):
"""
Readers that strip whitespace from lines should ignore
trailing whitespace after the last data value of each
row.
"""
text = "a b c\n1 2 \n3 4 5"
with pytest.raises(InconsistentTableError) as e:
ascii.read(StringIO(text), format="fast_basic", guess=False)
assert "header columns (3) inconsistent with data columns in data line 0" in str(
e.value
)
text = "a b c\n 1 2 3 \t \n 4 5 6 "
table = read_basic(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("a", "b", "c"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_data(parallel, read_basic):
"""
As long as column names are supplied, the C reader
should return an empty table in the absence of data.
"""
table = read_basic("a b c", parallel=parallel)
expected = Table([[], [], []], names=("a", "b", "c"))
assert_table_equal(table, expected)
table = read_basic("a b c\n1 2 3", data_start=2, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_line_endings(parallel, read_basic, read_commented_header, read_rdb):
"""
Make sure the fast reader accepts CR and CR+LF
as newlines.
"""
text = "a b c\n1 2 3\n4 5 6\n7 8 9\n"
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=("a", "b", "c"))
for newline in ("\r\n", "\r"):
table = read_basic(text.replace("\n", newline), parallel=parallel)
assert_table_equal(table, expected)
# Make sure the splitlines() method of FileString
# works with CR/CR+LF line endings
text = "#" + text
for newline in ("\r\n", "\r"):
table = read_commented_header(text.replace("\n", newline), parallel=parallel)
assert_table_equal(table, expected)
expected = Table(
[MaskedColumn([1, 4, 7]), [2, 5, 8], MaskedColumn([3, 6, 9])],
names=("a", "b", "c"),
)
expected["a"][0] = np.ma.masked
expected["c"][0] = np.ma.masked
text = "a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n"
for newline in ("\r\n", "\r"):
table = read_rdb(text.replace("\n", newline), parallel=parallel)
assert_table_equal(table, expected)
assert np.all(table == expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_store_comments(parallel, read_basic):
"""
Make sure that the output Table produced by the fast
reader stores any comment lines in its meta attribute.
"""
text = """
# header comment
a b c
# comment 2
# comment 3
1 2 3
4 5 6
"""
table = read_basic(text, parallel=parallel, check_meta=True)
assert_equal(table.meta["comments"], ["header comment", "comment 2", "comment 3"])
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_quotes(parallel, read_basic):
"""
Make sure the C reader doesn't segfault when the
input data contains empty quotes. [#3407]
"""
table = read_basic('a b\n1 ""\n2 ""', parallel=parallel)
expected = Table([[1, 2], [0, 0]], names=("a", "b"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fast_tab_with_names(parallel, read_tab):
"""
Make sure the C reader doesn't segfault when the header for the
first column is missing [#3545]
"""
content = """#
\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot
-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" # noqa: E501
head = [f"A{i}" for i in range(28)]
read_tab(content, data_start=1, parallel=parallel, names=head)
@pytest.mark.hugemem
def test_read_big_table(tmp_path):
"""Test reading of a huge file.
This test generates a huge CSV file (~2.3Gb) before reading it (see
https://github.com/astropy/astropy/pull/5319). The test is run only if the
``--run-hugemem`` cli option is given. Note that running the test requires
quite a lot of memory (~18Gb when reading the file) !!
"""
NB_ROWS = 250000
NB_COLS = 500
filename = tmp_path / "big_table.csv"
print(f"Creating a {NB_ROWS} rows table ({NB_COLS} columns).")
data = np.random.random(NB_ROWS)
t = Table(data=[data] * NB_COLS, names=[str(i) for i in range(NB_COLS)])
data = None
print(f"Saving the table to {filename}")
t.write(filename, format="ascii.csv", overwrite=True)
t = None
print(
"Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header)."
)
with open(filename) as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format="ascii.csv", fast_reader=True)
assert len(t) == NB_ROWS
@pytest.mark.hugemem
def test_read_big_table2(tmp_path):
"""Test reading of a file with a huge column."""
# (2**32 // 2) : max value for int
# // 10 : we use a value for rows that have 10 chars (1e9)
# + 5 : add a few lines so the length cannot be stored by an int
NB_ROWS = 2**32 // 2 // 10 + 5
filename = tmp_path / "big_table.csv"
print(f"Creating a {NB_ROWS} rows table.")
data = np.full(NB_ROWS, int(1e9), dtype=np.int32)
t = Table(data=[data], names=["a"], copy=False)
print(f"Saving the table to {filename}")
t.write(filename, format="ascii.csv", overwrite=True)
t = None
print(
"Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header)."
)
with open(filename) as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format="ascii.csv", fast_reader=True)
assert len(t) == NB_ROWS
# Test these both with guessing turned on and off
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize(
"fast_reader",
[False, dict(use_fast_converter=False), dict(use_fast_converter=True)],
)
@pytest.mark.parametrize("parallel", [False, True])
def test_data_out_of_range(parallel, fast_reader, guess):
"""
Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|)
shall be returned as 0 and +-inf respectively by the C parser, just like
the Python parser.
Test fast converter only to nominal accuracy.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.0e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader["parallel"] = parallel
if fast_reader.get("use_fast_converter"):
rtol = 1.0e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
test_for_warnings = fast_reader and not parallel
if not parallel and not fast_reader:
ctx = nullcontext()
else:
ctx = pytest.warns()
fields = ["10.1E+199", "3.14e+313", "2048e+306", "0.6E-325", "-2.e345"]
values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])
# NOTE: Warning behavior varies for the parameters being passed in.
with ctx as w:
t = ascii.read(
StringIO(" ".join(fields)),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
if test_for_warnings: # Assert precision warnings for cols 2-5
assert len(w) == 4
for i in range(len(w)):
assert f"OverflowError converting to FloatType in column col{i+2}" in str(
w[i].message
)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)
# Test some additional corner cases
fields = [
".0101E202",
"0.000000314E+314",
"1777E+305",
"-1799E+305",
"0.2e-323",
"5200e-327",
" 0.0000000000000000000001024E+330",
]
values = np.array(
[1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308]
)
with ctx as w:
t = ascii.read(
StringIO(" ".join(fields)),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
if test_for_warnings: # Assert precision warnings for cols 4-6
assert len(w) == 3
for i in range(len(w)):
assert f"OverflowError converting to FloatType in column col{i+4}" in str(
w[i].message
)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)
# Test corner cases again with non-standard exponent_style (auto-detection)
if fast_reader and fast_reader.get("use_fast_converter"):
fast_reader.update({"exponent_style": "A"})
else:
pytest.skip("Fortran exponent style only available in fast converter")
fields = [
".0101D202",
"0.000000314d+314",
"1777+305",
"-1799E+305",
"0.2e-323",
"2500-327",
" 0.0000000000000000000001024Q+330",
]
with ctx as w:
t = ascii.read(
StringIO(" ".join(fields)),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
if test_for_warnings:
assert len(w) == 3
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize(
"fast_reader",
[False, dict(use_fast_converter=False), dict(use_fast_converter=True)],
)
@pytest.mark.parametrize("parallel", [False, True])
def test_data_at_range_limit(parallel, fast_reader, guess):
"""
Test parsing of fixed-format float64 numbers near range limits
(|~4.94e-324 to 1.7977e+308|) - within limit for full precision
(|~2.5e-307| for strtod C parser, factor 10 better for fast_converter)
exact numbers shall be returned, beyond that an Overflow warning raised.
Input of exactly 0.0 must not raise an OverflowError.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.0e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader["parallel"] = parallel
if fast_reader.get("use_fast_converter"):
rtol = 1.0e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
# Test very long fixed-format strings (to strtod range limit w/o Overflow)
for D in 99, 202, 305:
t = ascii.read(
StringIO(99 * "0" + "." + D * "0" + "1"),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
assert_almost_equal(t["col1"][0], 10.0 ** -(D + 1), rtol=rtol, atol=1.0e-324)
for D in 99, 202, 308:
t = ascii.read(
StringIO("1" + D * "0" + ".0"),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
assert_almost_equal(t["col1"][0], 10.0**D, rtol=rtol, atol=1.0e-324)
# 0.0 is always exact (no Overflow warning)!
for s in "0.0", "0.0e+0", 399 * "0" + "." + 365 * "0":
t = ascii.read(
StringIO(s), format="no_header", guess=guess, fast_reader=fast_reader
)
assert t["col1"][0] == 0.0
# Test OverflowError at precision limit with laxer rtol
if parallel:
pytest.skip("Catching warnings broken in parallel mode")
elif not fast_reader:
pytest.skip("Python/numpy reader does not raise on Overflow")
with pytest.warns() as warning_lines:
t = ascii.read(
StringIO("0." + 314 * "0" + "1"),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
n_warns = len(warning_lines)
assert n_warns in (0, 1), f"Expected 0 or 1 warning, found {n_warns}"
if n_warns == 1:
assert (
"OverflowError converting to FloatType in column col1, possibly "
"resulting in degraded precision" in str(warning_lines[0].message)
)
assert_almost_equal(t["col1"][0], 1.0e-315, rtol=1.0e-10, atol=1.0e-324)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_int_out_of_range(parallel, guess):
"""
Integer numbers outside int range shall be returned as string columns
consistent with the standard (Python) parser (no 'upcasting' to float).
"""
imin = np.iinfo(int).min + 1
imax = np.iinfo(int).max - 1
huge = f"{imax+2:d}"
text = f"P M S\n {imax:d} {imin:d} {huge:s}"
expected = Table([[imax], [imin], [huge]], names=("P", "M", "S"))
# NOTE: Warning behavior varies for the parameters being passed in.
with pytest.warns() as w:
table = ascii.read(
text, format="basic", guess=guess, fast_reader={"parallel": parallel}
)
if not parallel:
assert len(w) == 1
assert (
"OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message)
)
assert_table_equal(table, expected)
# Check with leading zeroes to make sure strtol does not read them as octal
text = f"P M S\n000{imax:d} -0{-imin:d} 00{huge:s}"
expected = Table([[imax], [imin], ["00" + huge]], names=("P", "M", "S"))
with pytest.warns() as w:
table = ascii.read(
text, format="basic", guess=guess, fast_reader={"parallel": parallel}
)
if not parallel:
assert len(w) == 1
assert (
"OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message)
)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
def test_int_out_of_order(guess):
"""
Mixed columns should be returned as float, but if the out-of-range integer
shows up first, it will produce a string column - with both readers.
Broken with the parallel fast_reader.
"""
imax = np.iinfo(int).max - 1
text = f"A B\n 12.3 {imax:d}0\n {imax:d}0 45.6e7"
expected = Table([[12.3, 10.0 * imax], [f"{imax:d}0", "45.6e7"]], names=("A", "B"))
with pytest.warns(
AstropyWarning,
match=r"OverflowError converting to "
r"IntType in column B, reverting to String",
):
table = ascii.read(text, format="basic", guess=guess, fast_reader=True)
assert_table_equal(table, expected)
with pytest.warns(
AstropyWarning,
match=r"OverflowError converting to "
r"IntType in column B, reverting to String",
):
table = ascii.read(text, format="basic", guess=guess, fast_reader=False)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_reader(parallel, guess):
"""
Make sure that ascii.read() can read Fortran-style exponential notation
using the fast_reader.
"""
# Check for nominal np.float64 precision
rtol = 1.0e-15
atol = 0.0
text = (
"A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n"
+ " 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309"
)
expc = Table(
[[1.0001e101, 0.42], [2, 0.5], [2.0e-103, 6.0e3], [3, 1.7e307]],
names=("A", "B", "C", "D"),
)
expstyles = {
"e": 6 * "E",
"D": ("D", "d", "d", "D", "d", "D"),
"Q": 3 * ("q", "Q"),
"Fortran": ("E", "0", "D", "Q", "d", "0"),
}
# C strtod (not-fast converter) can't handle Fortran exp
with pytest.raises(FastOptionsError) as e:
ascii.read(
text.format(*(6 * "D")),
format="basic",
guess=guess,
fast_reader={
"use_fast_converter": False,
"parallel": parallel,
"exponent_style": "D",
},
)
assert "fast_reader: exponent_style requires use_fast_converter" in str(e.value)
# Enable multiprocessing and the fast converter iterate over
# all style-exponent combinations, with auto-detection
for s, c in expstyles.items():
table = ascii.read(
text.format(*c),
guess=guess,
fast_reader={"parallel": parallel, "exponent_style": s},
)
assert_table_equal(table, expc, rtol=rtol, atol=atol)
# Additional corner-case checks including triple-exponents without
# any character and mixed whitespace separators
text = (
"A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n "
+ "0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330"
)
table = ascii.read(
text, guess=guess, fast_reader={"parallel": parallel, "exponent_style": "A"}
)
assert_table_equal(table, expc, rtol=rtol, atol=atol)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_invalid_exp(parallel, guess):
"""
Test Fortran-style exponential notation in the fast_reader with invalid
exponent-like patterns (no triple-digits) to make sure they are returned
as strings instead, as with the standard C parser.
"""
if parallel and CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
formats = {"basic": " ", "tab": "\t", "csv": ","}
header = ["S1", "F2", "S2", "F3", "S3", "F4", "F5", "S4", "I1", "F6", "F7"]
# Tested entries and expected returns, first for auto-detect,
# then for different specified exponents
# fmt: off
fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.',
'2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314']
vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314']
vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308]
vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
# fmt: on
# Iterate over supported format types and separators
for f, s in formats.items():
t1 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
format=f,
guess=guess,
fast_reader={"parallel": parallel, "exponent_style": "A"},
)
assert_table_equal(t1, Table([[col] for col in vals_a], names=header))
# Non-basic separators require guessing enabled to be detected
if guess:
formats["bar"] = "|"
else:
formats = {"basic": " "}
for s in formats.values():
t2 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
guess=guess,
fast_reader={"parallel": parallel, "exponent_style": "a"},
)
assert_table_equal(t2, Table([[col] for col in vals_a], names=header))
# Iterate for (default) expchar 'E'
for s in formats.values():
t3 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
guess=guess,
fast_reader={"parallel": parallel, "use_fast_converter": True},
)
assert_table_equal(t3, Table([[col] for col in vals_e], names=header))
# Iterate for expchar 'D'
for s in formats.values():
t4 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
guess=guess,
fast_reader={"parallel": parallel, "exponent_style": "D"},
)
assert_table_equal(t4, Table([[col] for col in vals_d], names=header))
# Iterate for regular converter (strtod)
for s in formats.values():
t5 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
guess=guess,
fast_reader={"parallel": parallel, "use_fast_converter": False},
)
read_values = [col[0] for col in t5.itercols()]
if os.name == "nt":
# Apparently C strtod() on (some?) MSVC recognizes 'd' exponents!
assert read_values == vals_v or read_values == vals_e
else:
assert read_values == vals_e
def test_fortran_reader_notbasic():
"""
Check if readers without a fast option raise a value error when a
fast_reader is asked for (implies the default 'guess=True').
"""
tabstr = dedent(
"""
a b
1 1.23D4
2 5.67D-8
"""
)[1:-1]
t1 = ascii.read(tabstr.split("\n"), fast_reader=dict(exponent_style="D"))
assert t1["b"].dtype.kind == "f"
tabrdb = dedent(
"""
a\tb
# A simple RDB table
N\tN
1\t 1.23D4
2\t 5.67-008
"""
)[1:-1]
t2 = ascii.read(
tabrdb.split("\n"), format="rdb", fast_reader=dict(exponent_style="fortran")
)
assert t2["b"].dtype.kind == "f"
tabrst = dedent(
"""
= =======
a b
= =======
1 1.23E4
2 5.67E-8
= =======
"""
)[1:-1]
t3 = ascii.read(tabrst.split("\n"), format="rst")
assert t3["b"].dtype.kind == "f"
t4 = ascii.read(tabrst.split("\n"), guess=True)
assert t4["b"].dtype.kind == "f"
# In the special case of fast_converter=True (the default),
# incompatibility is ignored
t5 = ascii.read(tabrst.split("\n"), format="rst", fast_reader=True)
assert t5["b"].dtype.kind == "f"
with pytest.raises(ParameterError):
ascii.read(tabrst.split("\n"), format="rst", guess=False, fast_reader="force")
with pytest.raises(ParameterError):
ascii.read(
tabrst.split("\n"),
format="rst",
guess=False,
fast_reader=dict(use_fast_converter=False),
)
tabrst = tabrst.replace("E", "D")
with pytest.raises(ParameterError):
ascii.read(
tabrst.split("\n"),
format="rst",
guess=False,
fast_reader=dict(exponent_style="D"),
)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize(
"fast_reader", [dict(exponent_style="D"), dict(exponent_style="A")]
)
def test_dict_kwarg_integrity(fast_reader, guess):
"""
Check if dictionaries passed as kwargs (fast_reader in this test) are
left intact by ascii.read()
"""
expstyle = fast_reader.get("exponent_style", "E")
fields = ["10.1D+199", "3.14d+313", "2048d+306", "0.6D-325", "-2.d345"]
ascii.read(StringIO(" ".join(fields)), guess=guess, fast_reader=fast_reader)
assert fast_reader.get("exponent_style", None) == expstyle
@pytest.mark.parametrize(
"fast_reader", [False, dict(parallel=True), dict(parallel=False)]
)
def test_read_empty_basic_table_with_comments(fast_reader):
"""
Test for reading a "basic" format table that has no data but has comments.
Tests the fix for #8267.
"""
dat = """
# comment 1
# comment 2
col1 col2
"""
t = ascii.read(dat, fast_reader=fast_reader)
assert t.meta["comments"] == ["comment 1", "comment 2"]
assert len(t) == 0
assert t.colnames == ["col1", "col2"]
@pytest.mark.parametrize(
"fast_reader", [dict(use_fast_converter=True), dict(exponent_style="A")]
)
def test_conversion_fast(fast_reader):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = ascii.read(text, fast_reader=fast_reader)
assert_equal(table["A"].dtype.kind, "f")
assert table["B"].dtype.kind in ("S", "U")
assert_equal(table["C"].dtype.kind, "i")
assert_equal(table["D"].dtype.kind, "f")
assert table["E"].dtype.kind in ("S", "U")
assert table["F"].dtype.kind in ("S", "U")
assert table["G"].dtype.kind in ("S", "U")
assert table["H"].dtype.kind in ("S", "U")
@pytest.mark.parametrize("delimiter", ["\n", "\r"])
@pytest.mark.parametrize("fast_reader", [False, True, "force"])
def test_newline_as_delimiter(delimiter, fast_reader):
"""
Check that newline characters are correctly handled as delimiters.
Tests the fix for #9928.
"""
if delimiter == "\r":
eol = "\n"
else:
eol = "\r"
inp0 = ["a | b | c ", " 1 | '2' | 3.00000 "]
inp1 = "a {0:s} b {0:s}c{1:s} 1 {0:s}'2'{0:s} 3.0".format(delimiter, eol)
inp2 = [f"a {delimiter} b{delimiter} c", f"1{delimiter} '2' {delimiter} 3.0"]
t0 = ascii.read(inp0, delimiter="|", fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read(inp2, delimiter=delimiter, fast_reader=fast_reader)
assert t1.colnames == t2.colnames == ["a", "b", "c"]
assert len(t1) == len(t2) == 1
assert t1["b"].dtype.kind in ("S", "U")
assert t2["b"].dtype.kind in ("S", "U")
assert_table_equal(t1, t0)
assert_table_equal(t2, t0)
inp0 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format("|", eol)
inp1 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format(delimiter, eol)
t0 = ascii.read(inp0, delimiter="|", fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
if not fast_reader:
pytest.xfail("Quoted fields are not parsed correctly by BaseSplitter")
assert_equal(t1["b"].dtype.kind, "i")
@pytest.mark.parametrize("delimiter", [" ", "|", "\n", "\r"])
@pytest.mark.parametrize("fast_reader", [False, True, "force"])
def test_single_line_string(delimiter, fast_reader):
"""
String input without a newline character is interpreted as filename,
unless element of an iterable. Maybe not logical, but test that it is
at least treated consistently.
"""
expected = Table([[1], [2], [3.00]], names=("col1", "col2", "col3"))
text = "1{0:s}2{0:s}3.0".format(delimiter)
if delimiter in ("\r", "\n"):
t1 = ascii.read(
text, format="no_header", delimiter=delimiter, fast_reader=fast_reader
)
assert_table_equal(t1, expected)
else:
# Windows raises OSError, but not the other OSes.
with pytest.raises((FileNotFoundError, OSError)):
t1 = ascii.read(
text, format="no_header", delimiter=delimiter, fast_reader=fast_reader
)
t2 = ascii.read(
[text], format="no_header", delimiter=delimiter, fast_reader=fast_reader
)
assert_table_equal(t2, expected)
|
1f0bb87f615dd70c96d2e6de2d58279b18de4ecbad4a33fd7858cf7942b16b50 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``ECSV``
reader/writer.
"""
import copy
import os
import sys
from contextlib import nullcontext
from io import StringIO
import numpy as np
import pytest
import yaml
from astropy import units as u
from astropy.io import ascii
from astropy.io.ascii.ecsv import DELIMITERS, InvalidEcsvDatatypeWarning
from astropy.io.tests.mixin_columns import compare_attrs, mixin_cols, serialized_names
from astropy.table import Column, QTable, Table
from astropy.table.column import MaskedColumn
from astropy.table.table_helpers import simple_table
from astropy.units import QuantityInfo
from astropy.units import allclose as quantity_allclose
from .common import TEST_DIR
DTYPES = [
"bool",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"float128",
"str",
]
if not hasattr(np, "float128") or os.name == "nt" or sys.maxsize <= 2**32:
DTYPES.remove("float128")
T_DTYPES = Table()
for dtype in DTYPES:
if dtype == "bool":
data = np.array([False, True, False])
elif dtype == "str":
data = np.array(["ab 0", "ab, 1", "ab2"])
else:
data = np.arange(3, dtype=dtype)
c = Column(
data, unit="m / s", description="descr_" + dtype, meta={"meta " + dtype: 1}
)
T_DTYPES[dtype] = c
T_DTYPES.meta["comments"] = ["comment1", "comment2"]
# Corresponds to simple_table()
SIMPLE_LINES = [
"# %ECSV 1.0",
"# ---",
"# datatype:",
"# - {name: a, datatype: int64}",
"# - {name: b, datatype: float64}",
"# - {name: c, datatype: string}",
"# schema: astropy-2.0",
"a b c",
"1 1.0 c",
"2 2.0 d",
"3 3.0 e",
]
def test_write_simple():
"""
Write a simple table with common types. This shows the compact version
of serialization with one line per column.
"""
t = simple_table()
out = StringIO()
t.write(out, format="ascii.ecsv")
assert out.getvalue().splitlines() == SIMPLE_LINES
def test_write_full():
"""
Write a full-featured table with common types and explicitly checkout output
"""
t = T_DTYPES["bool", "int64", "float64", "str"]
lines = [
"# %ECSV 1.0",
"# ---",
"# datatype:",
"# - name: bool",
"# unit: m / s",
"# datatype: bool",
"# description: descr_bool",
"# meta: {meta bool: 1}",
"# - name: int64",
"# unit: m / s",
"# datatype: int64",
"# description: descr_int64",
"# meta: {meta int64: 1}",
"# - name: float64",
"# unit: m / s",
"# datatype: float64",
"# description: descr_float64",
"# meta: {meta float64: 1}",
"# - name: str",
"# unit: m / s",
"# datatype: string",
"# description: descr_str",
"# meta: {meta str: 1}",
"# meta: !!omap",
"# - comments: [comment1, comment2]",
"# schema: astropy-2.0",
"bool int64 float64 str",
'False 0 0.0 "ab 0"',
'True 1 1.0 "ab, 1"',
"False 2 2.0 ab2",
]
out = StringIO()
t.write(out, format="ascii.ecsv")
assert out.getvalue().splitlines() == lines
def test_write_read_roundtrip():
"""
Write a full-featured table with all types and see that it round-trips on
readback. Use both space and comma delimiters.
"""
t = T_DTYPES
for delimiter in DELIMITERS:
out = StringIO()
t.write(out, format="ascii.ecsv", delimiter=delimiter)
t2s = [
Table.read(out.getvalue(), format="ascii.ecsv"),
Table.read(out.getvalue(), format="ascii"),
ascii.read(out.getvalue()),
ascii.read(out.getvalue(), format="ecsv", guess=False),
ascii.read(out.getvalue(), format="ecsv"),
]
for t2 in t2s:
assert t.meta == t2.meta
for name in t.colnames:
assert t[name].attrs_equal(t2[name])
assert np.all(t[name] == t2[name])
def test_bad_delimiter():
"""
Passing a delimiter other than space or comma gives an exception
"""
out = StringIO()
with pytest.raises(ValueError) as err:
T_DTYPES.write(out, format="ascii.ecsv", delimiter="|")
assert "only space and comma are allowed" in str(err.value)
def test_bad_header_start():
"""
Bad header without initial # %ECSV x.x
"""
lines = copy.copy(SIMPLE_LINES)
lines[0] = "# %ECV 0.9"
with pytest.raises(ascii.InconsistentTableError):
Table.read("\n".join(lines), format="ascii.ecsv", guess=False)
def test_bad_delimiter_input():
"""
Illegal delimiter in input
"""
lines = copy.copy(SIMPLE_LINES)
lines.insert(2, "# delimiter: |")
with pytest.raises(ValueError) as err:
Table.read("\n".join(lines), format="ascii.ecsv", guess=False)
assert "only space and comma are allowed" in str(err.value)
def test_multidim_input():
"""
Multi-dimensional column in input
"""
t = Table()
t["a"] = np.arange(24).reshape(2, 3, 4)
t["a"].info.description = "description"
t["a"].info.meta = {1: 2}
t["b"] = [1, 2]
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert np.all(t2["a"] == t["a"])
assert t2["a"].shape == t["a"].shape
assert t2["a"].dtype == t["a"].dtype
assert t2["a"].info.description == t["a"].info.description
assert t2["a"].info.meta == t["a"].info.meta
assert np.all(t2["b"] == t["b"])
def test_structured_input():
"""
Structured column in input.
"""
t = Table()
# Add unit, description and meta to make sure that round-trips as well.
t["a"] = Column(
[("B", (1.0, [2.0, 3.0])), ("A", (9.0, [8.0, 7.0]))],
dtype=[("s", "U1"), ("v", [("p0", "f8"), ("p1", "2f8")])],
description="description",
format=">", # Most formats do not work with structured!
unit="m", # Overall unit should round-trip.
meta={1: 2},
)
t["b"] = Column(
[[(1.0, 2.0), (9.0, 8.0)], [(3.0, 4.0), (7.0, 6.0)]],
dtype="f8,f8",
unit=u.Unit("m,s"), # Per part unit should round-trip too.
)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
for col in t.colnames:
assert np.all(t2[col] == t[col])
assert t2[col].shape == t[col].shape
assert t2[col].dtype == t[col].dtype
assert t2[col].unit == t[col].unit
assert t2[col].format == t[col].format
assert t2[col].info.description == t[col].info.description
assert t2[col].info.meta == t[col].info.meta
def test_round_trip_empty_table():
"""Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)"""
t = Table(dtype=[bool, "i", "f"], names=["a", "b", "c"])
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t.dtype == t2.dtype
assert len(t2) == 0
def test_csv_ecsv_colnames_mismatch():
"""
Test that mismatch in column names from normal CSV header vs.
ECSV YAML header raises the expected exception.
"""
lines = copy.copy(SIMPLE_LINES)
header_index = lines.index("a b c")
lines[header_index] = "a b d"
with pytest.raises(ValueError) as err:
ascii.read(lines, format="ecsv")
assert "column names from ECSV header ['a', 'b', 'c']" in str(err.value)
def test_regression_5604():
"""
See https://github.com/astropy/astropy/issues/5604 for more.
"""
t = Table()
t.meta = {"foo": 5 * u.km, "foo2": u.s}
t["bar"] = [7] * u.km
out = StringIO()
t.write(out, format="ascii.ecsv")
assert "!astropy.units.Unit" in out.getvalue()
assert "!astropy.units.Quantity" in out.getvalue()
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
assert obj1.shape == obj2.shape
info_attrs = [
"info.name",
"info.format",
"info.unit",
"info.description",
"info.dtype",
]
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split("."):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
if isinstance(a1, np.ndarray) and a1.dtype.kind == "f":
assert quantity_allclose(a1, a2, rtol=1e-10)
else:
assert np.all(a1 == a2)
# For no attrs that means we just compare directly.
if not attrs:
if isinstance(obj1, np.ndarray) and obj1.dtype.kind == "f":
assert quantity_allclose(obj1, obj2, rtol=1e-15)
else:
assert np.all(obj1 == obj2)
def test_ecsv_mixins_ascii_read_class():
"""Ensure that ascii.read(ecsv_file) returns the correct class
(QTable if any Quantity subclasses, Table otherwise).
"""
# Make a table with every mixin type except Quantities
t = QTable(
{
name: col
for name, col in mixin_cols.items()
if not isinstance(col.info, QuantityInfo)
}
)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format="ecsv")
assert type(t2) is Table
# Add a single quantity column
t["lon"] = mixin_cols["lon"]
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format="ecsv")
assert type(t2) is QTable
def test_ecsv_mixins_qtable_to_table():
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.allclose(col.value, col2, rtol=1e-10)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_ecsv_mixins_as_one(table_cls):
"""Test write/read all cols at once and validate intermediate column names"""
names = sorted(mixin_cols)
all_serialized_names = []
# ECSV stores times as value by default, so we just get the column back.
# One exception is tm3, which is set to serialize via jd1 and jd2.
for name in names:
s_names = serialized_names[name]
if not name.startswith("tm3"):
s_names = [
s_name.replace(".jd1", "")
for s_name in s_names
if not s_name.endswith("jd2")
]
all_serialized_names.extend(s_names)
t = table_cls([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format="ascii.ecsv")
assert t.colnames == t2.colnames
# Read as a ascii.basic table (skip all the ECSV junk)
t3 = table_cls.read(out.getvalue(), format="ascii.basic")
assert t3.colnames == all_serialized_names
def make_multidim(col, ndim):
"""Take a col with length=2 and make it N-d by repeating elements.
For the special case of ndim==1 just return the original.
The output has shape [3] * ndim. By using 3 we can be sure that repeating
the two input elements gives an output that is sufficiently unique for
the multidim tests.
"""
if ndim > 1:
import itertools
idxs = [idx for idx, _ in zip(itertools.cycle([0, 1]), range(3**ndim))]
col = col[idxs].reshape([3] * ndim)
return col
@pytest.mark.parametrize("name_col", list(mixin_cols.items()))
@pytest.mark.parametrize("table_cls", (Table, QTable))
@pytest.mark.parametrize("ndim", (1, 2, 3))
def test_ecsv_mixins_per_column(table_cls, name_col, ndim):
"""Test write/read one col at a time and do detailed validation.
This tests every input column type as 1-d, 2-d and 3-d.
"""
name, col = name_col
c = make_multidim(np.array([1.0, 2.0]), ndim)
col = make_multidim(col, ndim)
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "description"
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format="ascii.ecsv")
assert t.colnames == t2.colnames
for colname in t.colnames:
assert len(t2[colname].shape) == ndim
if colname in ("c1", "c2"):
compare = ["data"]
else:
# Storing Longitude as Column loses wrap_angle.
compare = [
attr
for attr in compare_attrs[colname]
if not (attr == "wrap_angle" and table_cls is Table)
]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
def test_round_trip_masked_table_default(tmp_path):
"""Test (mostly) round-trip of MaskedColumn through ECSV using default serialization
that uses an empty string "" to mark NULL values. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = tmp_path / "test.ecsv"
t = simple_table(masked=True) # int, float, and str cols with one masked element
t.write(filename)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
# From formal perspective the round-trip columns are the "same"
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# But peeking under the mask shows that the underlying data are changed
# because by default ECSV uses "" to represent masked elements.
t[name].mask = False
t2[name].mask = False
assert not np.all(t2[name] == t[name]) # Expected diff
def test_round_trip_masked_table_serialize_mask(tmp_path):
"""
Same as prev but set the serialize_method to 'data_mask' so mask is written out
"""
filename = tmp_path / "test.ecsv"
t = simple_table(masked=True) # int, float, and str cols with one masked element
t["c"][0] = "" # This would come back as masked for default "" NULL marker
# MaskedColumn with no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about how we test a column with no masked elements.
t["d"] = [1, 2, 3]
t.write(filename, serialize_method="data_mask")
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_ecsv_round_trip_user_defined_unit(table_cls, tmp_path):
"""Ensure that we can read-back enabled user-defined units."""
# Test adapted from #8897, where it was noted that this works
# but was not tested.
filename = tmp_path / "test.ecsv"
unit = u.def_unit("bandpass_sol_lum")
t = table_cls()
t["l"] = np.arange(5) * unit
t.write(filename)
# without the unit enabled, get UnrecognizedUnit
if table_cls is QTable:
ctx = pytest.warns(u.UnitsWarning, match=r"'bandpass_sol_lum' did not parse .*")
else:
ctx = nullcontext()
# Note: The read might also generate ResourceWarning, in addition to UnitsWarning
with ctx:
t2 = table_cls.read(filename)
assert isinstance(t2["l"].unit, u.UnrecognizedUnit)
assert str(t2["l"].unit) == "bandpass_sol_lum"
if table_cls is QTable:
assert np.all(t2["l"].value == t["l"].value)
else:
assert np.all(t2["l"] == t["l"])
# But with it enabled, it works.
with u.add_enabled_units(unit):
t3 = table_cls.read(filename)
assert t3["l"].unit is unit
assert np.all(t3["l"] == t["l"])
# Just to be sure, also try writing with unit enabled.
filename2 = tmp_path / "test2.ecsv"
t3.write(filename2)
t4 = table_cls.read(filename)
assert t4["l"].unit is unit
assert np.all(t4["l"] == t["l"])
def test_read_masked_bool():
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: col0, datatype: bool}
# schema: astropy-2.0
col0
1
0
True
""
False
"""
dat = ascii.read(txt, format="ecsv")
col = dat["col0"]
assert isinstance(col, MaskedColumn)
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
@pytest.mark.parametrize("serialize_method", ["null_value", "data_mask"])
@pytest.mark.parametrize("dtype", [np.int64, np.float64, bool, str])
@pytest.mark.parametrize("delimiter", [",", " "])
def test_roundtrip_multidim_masked_array(serialize_method, dtype, delimiter):
# TODO also test empty string with null value
t = Table()
col = MaskedColumn(np.arange(12).reshape(2, 3, 2), dtype=dtype)
if dtype is str:
# np does something funny and gives a dtype of U21.
col = col.astype("U2")
col.mask[0, 0, 0] = True
col.mask[1, 1, 1] = True
t["a"] = col
t["b"] = ["x", "y"] # Add another column for kicks
out = StringIO()
t.write(out, format="ascii.ecsv", serialize_method=serialize_method)
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
if hasattr(t[name], "mask"):
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize("subtype", ["some-user-type", "complex"])
def test_multidim_unknown_subtype(subtype):
"""Test an ECSV file with a string type but unknown subtype"""
txt = f"""\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: {subtype}
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.warns(
InvalidEcsvDatatypeWarning,
match=rf"unexpected subtype '{subtype}' set for column 'a'",
):
t = ascii.read(txt, format="ecsv")
assert t["a"].dtype.kind == "U"
assert t["a"][0] == "[1,2]"
def test_multidim_bad_shape():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: int64[3]
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.raises(
ValueError, match="column 'a' failed to convert: shape mismatch"
):
Table.read(txt, format="ascii.ecsv")
def test_write_not_json_serializable():
t = Table()
t["a"] = np.array([{1, 2}, 1], dtype=object)
match = (
"could not convert column 'a' to string: Object of type set is not JSON"
" serializable"
)
out = StringIO()
with pytest.raises(TypeError, match=match):
t.write(out, format="ascii.ecsv")
def test_read_not_json_serializable():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: string, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: column value is not valid JSON"
with pytest.raises(ValueError, match=match):
Table.read(txt, format="ascii.ecsv")
def test_read_bad_datatype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: object}
# schema: astropy-2.0
a
fail
[3,4]"""
with pytest.warns(
InvalidEcsvDatatypeWarning,
match="unexpected datatype 'object' of column 'a' is not in allowed",
):
t = Table.read(txt, format="ascii.ecsv")
assert t["a"][0] == "fail"
assert type(t["a"][1]) is str
assert type(t["a"].dtype) == np.dtype("O")
def test_read_complex():
"""Test an ECSV v1.0 file with a complex column"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: complex}
# schema: astropy-2.0
a
1+1j
2+2j"""
with pytest.warns(
InvalidEcsvDatatypeWarning,
match="unexpected datatype 'complex' of column 'a' is not in allowed",
):
t = Table.read(txt, format="ascii.ecsv")
assert t["a"].dtype.type is np.complex128
def test_read_str():
"""Test an ECSV file with a 'str' instead of 'string' datatype"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: str}
# schema: astropy-2.0
a
sometext
S""" # also testing single character text
with pytest.warns(
InvalidEcsvDatatypeWarning,
match="unexpected datatype 'str' of column 'a' is not in allowed",
):
t = Table.read(txt, format="ascii.ecsv")
assert isinstance(t["a"][1], str)
assert isinstance(t["a"][0], np.str_)
def test_read_bad_datatype_for_object_subtype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: int64, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: datatype of column 'a' must be \"string\""
with pytest.raises(ValueError, match=match):
Table.read(txt, format="ascii.ecsv")
def test_full_repr_roundtrip():
"""Test round-trip of float values to full precision even with format
specified"""
t = Table()
t["a"] = np.array([np.pi, 1 / 7], dtype=np.float64)
t["a"].info.format = ".2f"
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert np.all(t["a"] == t2["a"])
assert t2["a"].info.format == ".2f"
#############################################################################
# Define a number of specialized columns for testing and the expected values
# of `datatype` for each column.
#############################################################################
# First here is some helper code used to make the expected outputs code.
def _get_ecsv_header_dict(text):
lines = [line.strip() for line in text.splitlines()]
lines = [line[2:] for line in lines if line.startswith("#")]
lines = lines[2:] # Get rid of the header
out = yaml.safe_load("\n".join(lines))
return out
def _make_expected_values(cols):
from pprint import pformat
for name, col in cols.items():
t = Table()
t[name] = col
out = StringIO()
t.write(out, format="ascii.ecsv")
hdr = _get_ecsv_header_dict(out.getvalue())
fmt_hdr = pformat(hdr["datatype"])
print(f"exps[{name!r}] =", fmt_hdr[:1])
print(fmt_hdr[1:])
print()
# Expected values of `datatype` for each column
exps = {}
cols = {}
# Run of the mill scalar for completeness
cols["scalar"] = np.array([1, 2], dtype=np.int16)
exps["scalar"] = [{"datatype": "int16", "name": "scalar"}]
# Array of lists that works as a 2-d variable array. This is just treated
# as an object.
cols["2-d variable array lists"] = c = np.empty(shape=(2,), dtype=object)
c[0] = [[1, 2], ["a", 4]]
c[1] = [[1, 2, 3], [4, 5.25, 6]]
exps["2-d variable array lists"] = [
{"datatype": "string", "name": "2-d variable array lists", "subtype": "json"}
]
# Array of numpy arrays that is a 2-d variable array
cols["2-d variable array numpy"] = c = np.empty(shape=(2,), dtype=object)
c[0] = np.array([[1, 2], [3, 4]], dtype=np.float32)
c[1] = np.array([[1, 2, 3], [4, 5.5, 6]], dtype=np.float32)
exps["2-d variable array numpy"] = [
{
"datatype": "string",
"name": "2-d variable array numpy",
"subtype": "float32[2,null]",
}
]
cols["1-d variable array lists"] = np.array([[1, 2], [3, 4, 5]], dtype=object)
exps["1-d variable array lists"] = [
{"datatype": "string", "name": "1-d variable array lists", "subtype": "json"}
]
# Variable-length array
cols["1-d variable array numpy"] = np.array(
[np.array([1, 2], dtype=np.uint8), np.array([3, 4, 5], dtype=np.uint8)],
dtype=object,
)
exps["1-d variable array numpy"] = [
{"datatype": "string", "name": "1-d variable array numpy", "subtype": "uint8[null]"}
]
cols["1-d variable array numpy str"] = np.array(
[np.array(["a", "b"]), np.array(["c", "d", "e"])], dtype=object
)
exps["1-d variable array numpy str"] = [
{
"datatype": "string",
"name": "1-d variable array numpy str",
"subtype": "string[null]",
}
]
cols["1-d variable array numpy bool"] = np.array(
[np.array([True, False]), np.array([True, False, True])], dtype=object
)
exps["1-d variable array numpy bool"] = [
{
"datatype": "string",
"name": "1-d variable array numpy bool",
"subtype": "bool[null]",
}
]
cols["1-d regular array"] = np.array([[1, 2], [3, 4]], dtype=np.int8)
exps["1-d regular array"] = [
{"datatype": "string", "name": "1-d regular array", "subtype": "int8[2]"}
]
cols["2-d regular array"] = np.arange(8, dtype=np.float16).reshape(2, 2, 2)
exps["2-d regular array"] = [
{"datatype": "string", "name": "2-d regular array", "subtype": "float16[2,2]"}
]
cols["scalar object"] = np.array([{"a": 1}, {"b": 2}], dtype=object)
exps["scalar object"] = [
{"datatype": "string", "name": "scalar object", "subtype": "json"}
]
cols["1-d object"] = np.array(
[[{"a": 1}, {"b": 2}], [{"a": 1}, {"b": 2}]], dtype=object
)
exps["1-d object"] = [
{"datatype": "string", "name": "1-d object", "subtype": "json[2]"}
]
@pytest.mark.parametrize("name,col,exp", list(zip(cols, cols.values(), exps.values())))
def test_specialized_columns(name, col, exp):
"""Test variable length lists, multidim columns, object columns."""
t = Table()
t[name] = col
out = StringIO()
t.write(out, format="ascii.ecsv")
hdr = _get_ecsv_header_dict(out.getvalue())
assert hdr["datatype"] == exp
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
assert np.all(val1 == val2)
def test_full_subtypes():
"""Read ECSV file created by M. Taylor that includes scalar, fixed array,
variable array for all datatypes. This file has missing values for all
columns as both per-value null and blank entries for the entire column
value.
Note: original file was modified to include blank values in f_float and
f_double columns.
"""
t = Table.read(os.path.join(TEST_DIR, "data", "subtypes.ecsv"))
colnames = (
"i_index,"
"s_byte,s_short,s_int,s_long,s_float,s_double,s_string,s_boolean,"
"f_byte,f_short,f_int,f_long,f_float,f_double,f_string,f_boolean,"
"v_byte,v_short,v_int,v_long,v_float,v_double,v_string,v_boolean,"
"m_int,m_double"
).split(",")
assert t.colnames == colnames
type_map = {
"byte": "int8",
"short": "int16",
"int": "int32",
"long": "int64",
"float": "float32",
"double": "float64",
"string": "str",
"boolean": "bool",
}
for col in t.itercols():
info = col.info
if info.name == "i_index":
continue
assert isinstance(col, MaskedColumn)
type_name = info.name[2:] # short, int, etc
subtype = info.name[:1]
if subtype == "s": # Scalar
assert col.shape == (16,)
if subtype == "f": # Fixed array
assert col.shape == (16, 3)
if subtype == "v": # Variable array
assert col.shape == (16,)
assert info.dtype.name == "object"
for val in col:
assert isinstance(val, np.ndarray)
assert val.dtype.name.startswith(type_map[type_name])
assert len(val) in [0, 1, 2, 3]
else:
assert info.dtype.name.startswith(type_map[type_name])
def test_masked_empty_subtypes():
"""Test blank field in subtypes. Similar to previous test but with explicit
checks of values"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: o, datatype: string, subtype: json}
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
o f v
null [0,1] [1]
"" "" ""
[1,2] [2,3] [2,3]
"""
t = Table.read(txt, format="ascii.ecsv")
assert np.all(t["o"] == np.array([None, -1, [1, 2]], dtype=object))
assert np.all(t["o"].mask == [False, True, False])
exp = np.ma.array([[0, 1], [-1, -1], [2, 3]], mask=[[0, 0], [1, 1], [0, 0]])
assert np.all(t["f"] == exp)
assert np.all(t["f"].mask == exp.mask)
assert np.all(t["v"][0] == [1])
assert np.all(t["v"][2] == [2, 3])
assert np.all(t["v"].mask == [False, True, False])
def test_masked_vals_in_array_subtypes():
"""Test null values in fixed and variable array subtypes."""
t = Table()
t["f"] = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]], dtype=np.int64)
t["v"] = np.empty(2, dtype=object)
t["v"][0] = np.ma.array([1, 2], mask=[0, 1], dtype=np.int64)
t["v"][1] = np.ma.array([3, 4, 5], mask=[1, 0, 0], dtype=np.int64)
out = StringIO()
t.write(out, format="ascii.ecsv")
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
f v
[1,null] [1,null]
[null,4] [null,4,5]
"""
hdr = _get_ecsv_header_dict(out.getvalue())
hdr_exp = _get_ecsv_header_dict(txt)
assert hdr == hdr_exp
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
assert type(t2[name]) is type(t[name]) # noqa: E721
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
if isinstance(val1, np.ma.MaskedArray):
assert np.all(val1.mask == val2.mask)
assert np.all(val1 == val2)
def test_guess_ecsv_with_one_column():
"""Except for ECSV, guessing always requires at least 2 columns"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: col, datatype: string, description: hello}
# schema: astropy-2.0
col
1
2
"""
t = ascii.read(txt)
assert t["col"].dtype.kind == "U" # would be int with basic format
assert t["col"].description == "hello"
|
320bfb64e24a55d504ee83c9d00ae953852b20e449588302c20106008179e5ed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import pathlib
from contextlib import nullcontext
from io import StringIO
from itertools import chain
import numpy as np
import pytest
from astropy import table
from astropy import units as u
from astropy.io import ascii
from astropy.table.table_helpers import simple_table
from astropy.utils.compat.optional_deps import HAS_BS4
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .common import setup_function, teardown_function # noqa: F401
test_defs = [
dict(
kwargs=dict(),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
),
dict(
kwargs=dict(delimiter=None),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
),
dict(
kwargs=dict(
formats={"XCENTER": "%12.1f", "YCENTER": "{0:.1f}"},
include_names=["XCENTER", "YCENTER"],
strip_whitespace=False,
),
out="""\
XCENTER YCENTER
" 138.5" 256.4
" 18.1" 280.2
""",
),
dict(
kwargs=dict(Writer=ascii.Rdb, exclude_names=["CHI"]),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tPIER\tPERROR
N\tN\tN\tN\tN\tN\tN\tN\tN\tS
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t0\tNo_error
""",
),
dict(
kwargs=dict(Writer=ascii.Tab),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tCHI\tPIER\tPERROR
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0.802\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t1.104\t0\tNo_error
""",
),
dict(
kwargs=dict(Writer=ascii.Csv),
out="""\
ID,XCENTER,YCENTER,MAG,MERR,MSKY,NITER,SHARPNESS,CHI,PIER,PERROR
14,138.538,256.405,15.461,0.003,34.85955,4,-0.032,0.802,0,No_error
18,18.114,280.170,22.329,0.206,30.12784,4,-2.544,1.104,0,No_error
""",
),
dict(
kwargs=dict(Writer=ascii.NoHeader),
out="""\
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
),
dict(
kwargs=dict(Writer=ascii.CommentedHeader),
out="""\
# ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
),
dict(
kwargs=dict(Writer=ascii.CommentedHeader, comment="&"),
out="""\
&ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
),
dict(
kwargs=dict(Writer=ascii.Latex),
out="""\
\\begin{table}
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
\\end{table}
""",
),
dict(
kwargs=dict(Writer=ascii.AASTex),
out="""\
\\begin{deluxetable}{ccccccccccc}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{pixels} & \\colhead{pixels} & \\colhead{magnitudes} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable}
""", # noqa: E501
),
dict(
kwargs=dict(
Writer=ascii.AASTex,
caption="Mag values \\label{tab1}",
latexdict={
"units": {"MAG": "[mag]", "XCENTER": "[pixel]"},
"tabletype": "deluxetable*",
"tablealign": "htpb",
},
),
out="""\
\\begin{deluxetable*}{ccccccccccc}[htpb]
\\tablecaption{Mag values \\label{tab1}}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{[pixel]} & \\colhead{pixels} & \\colhead{[mag]} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable*}
""", # noqa: E501
),
dict(
kwargs=dict(
Writer=ascii.Latex,
caption="Mag values \\label{tab1}",
latexdict={
"preamble": "\\begin{center}",
"tablefoot": "\\end{center}",
"data_end": ["\\hline", "\\hline"],
"units": {"MAG": "[mag]", "XCENTER": "[pixel]"},
"tabletype": "table*",
"tablealign": "h",
},
col_align="|lcccccccccc|",
),
out="""\
\\begin{table*}[h]
\\begin{center}
\\caption{Mag values \\label{tab1}}
\\begin{tabular}{|lcccccccccc|}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& [pixel] & pixels & [mag] & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\hline
\\hline
\\end{tabular}
\\end{center}
\\end{table*}
""",
),
dict(
kwargs=dict(Writer=ascii.Latex, latexdict=ascii.latexdicts["template"]),
out="""\
\\begin{tabletype}[tablealign]
preamble
\\caption{caption}
\\begin{tabular}{col_align}
header_start
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
header_end
data_start
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
data_end
\\end{tabular}
tablefoot
\\end{tabletype}
""",
),
dict(
kwargs=dict(Writer=ascii.Latex, latexdict={"tabletype": None}),
out="""\
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
""",
),
dict(
kwargs=dict(
Writer=ascii.HTML, htmldict={"css": "table,th,td{border:1px solid black;"}
),
out="""\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
<style>
table,th,td{border:1px solid black; </style>
</head>
<body>
<table>
<thead>
<tr>
<th>ID</th>
<th>XCENTER</th>
<th>YCENTER</th>
<th>MAG</th>
<th>MERR</th>
<th>MSKY</th>
<th>NITER</th>
<th>SHARPNESS</th>
<th>CHI</th>
<th>PIER</th>
<th>PERROR</th>
</tr>
</thead>
<tr>
<td>14</td>
<td>138.538</td>
<td>256.405</td>
<td>15.461</td>
<td>0.003</td>
<td>34.85955</td>
<td>4</td>
<td>-0.032</td>
<td>0.802</td>
<td>0</td>
<td>No_error</td>
</tr>
<tr>
<td>18</td>
<td>18.114</td>
<td>280.170</td>
<td>22.329</td>
<td>0.206</td>
<td>30.12784</td>
<td>4</td>
<td>-2.544</td>
<td>1.104</td>
<td>0</td>
<td>No_error</td>
</tr>
</table>
</body>
</html>
""",
),
dict(
kwargs=dict(Writer=ascii.Ipac),
out="""\
\\MERGERAD='INDEF'
\\IRAF='NOAO/IRAFV2.10EXPORT'
\\USER=''
\\HOST='tucana'
\\DATE='05-28-93'
\\TIME='14:46:13'
\\PACKAGE='daophot'
\\TASK='nstar'
\\IMAGE='test'
\\GRPFILE='test.psg.1'
\\PSFIMAGE='test.psf.1'
\\NSTARFILE='test.nst.1'
\\REJFILE='"hello world"'
\\SCALE='1.'
\\DATAMIN='50.'
\\DATAMAX='24500.'
\\GAIN='1.'
\\READNOISE='0.'
\\OTIME='00:07:59.0'
\\XAIRMASS='1.238106'
\\IFILTER='V'
\\RECENTER='yes'
\\FITSKY='no'
\\PSFMAG='16.594'
\\PSFRAD='5.'
\\FITRAD='3.'
\\MAXITER='50'
\\MAXGROUP='60'
\\FLATERROR='0.75'
\\PROFERROR='5.'
\\CLIPEXP='6'
\\CLIPRANGE='2.5'
| ID| XCENTER| YCENTER| MAG| MERR| MSKY| NITER| SHARPNESS| CHI| PIER| PERROR|
| long| double| double| double| double| double| long| double| double| long| char|
| | pixels| pixels| magnitudes| magnitudes| counts| | | | | perrors|
| null| null| null| null| null| null| null| null| null| null| null|
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""", # noqa: E501
),
]
test_defs_no_data = [
dict(
kwargs=dict(Writer=ascii.Ipac),
out="""\
\\ This is an example of a valid comment.
\\ The 2nd data line is used to verify the exact column parsing
\\ (unclear if this is a valid for the IPAC format)
\\catalog='sao'
\\date='Wed Sp 20 09:48:36 1995'
\\mykeyword='Another way for defining keyvalue string'
| ra| dec| sai| v2|sptype|
|double|double|long|double| char|
| unit| unit|unit| unit| ergs|
| null| null|null| null| null|
""",
),
]
tab_to_fill = ["a b c", "1 2 3", "1 1 3"]
test_defs_fill_value = [
dict(
kwargs=dict(),
out="""\
a b c
1 2 3
1 1 3
""",
),
dict(
kwargs=dict(fill_values=("1", "w")),
out="""\
a b c
w 2 3
w w 3
""",
),
dict(
kwargs=dict(fill_values=("1", "w", "b")),
out="""\
a b c
1 2 3
1 w 3
""",
),
dict(
kwargs=dict(fill_values=("1", "w"), fill_include_names=["b"]),
out="""\
a b c
1 2 3
1 w 3
""",
),
dict(
kwargs=dict(fill_values=("1", "w"), fill_exclude_names=["a"]),
out="""\
a b c
1 2 3
1 w 3
""",
),
dict(
kwargs=dict(
fill_values=("1", "w"),
fill_include_names=["a"],
fill_exclude_names=["a", "b"],
),
out="""\
a b c
1 2 3
1 1 3
""",
),
dict(
kwargs=dict(fill_values=[("1", "w")], formats={"a": "%4.2f"}),
out="""\
a b c
1.00 2 3
1.00 w 3
""",
),
]
test_def_masked_fill_value = [
dict(
kwargs=dict(),
out="""\
a b c
"" 2 3
1 1 ""
""",
),
dict(
kwargs=dict(fill_values=[("1", "w"), (ascii.masked, "X")]),
out="""\
a b c
X 2 3
w w X
""",
),
dict(
kwargs=dict(
fill_values=[("1", "w"), (ascii.masked, "XXX")], formats={"a": "%4.1f"}
),
out="""\
a b c
XXX 2 3
1.0 w XXX
""",
),
dict(
kwargs=dict(Writer=ascii.Csv),
out="""\
a,b,c
,2,3
1,1,
""",
),
]
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
def check_write_table(test_def, table, fast_writer, out=None):
if out is None:
out = StringIO()
try:
ascii.write(table, out, fast_writer=fast_writer, **test_def["kwargs"])
except ValueError as e: # if format doesn't have a fast writer, ignore
if "not in the list of formats with fast writers" not in str(e.value):
raise e
return
if isinstance(out, StringIO):
# Output went to a buffer
actual = out.getvalue()
else:
# Output went to a file
if str(out).startswith("~"):
# Ensure a file hasn't been accidentally written to a literal tilde
# path
assert not os.path.exists(out)
out = os.path.expanduser(out)
assert os.path.exists(out)
with open(out) as f:
actual = f.read()
os.remove(out)
print(f"Expected:\n{test_def['out']}")
print(f"Actual:\n{actual}")
assert [x.strip() for x in actual.strip().splitlines()] == [
x.strip() for x in test_def["out"].strip().splitlines()
]
def check_write_table_via_table(test_def, table, fast_writer, out=None):
if out is None:
out = StringIO()
test_def = copy.deepcopy(test_def)
if "Writer" in test_def["kwargs"]:
format = f"ascii.{test_def['kwargs']['Writer']._format_name}"
del test_def["kwargs"]["Writer"]
else:
format = "ascii"
try:
table.write(out, format=format, fast_writer=fast_writer, **test_def["kwargs"])
except ValueError as e: # if format doesn't have a fast writer, ignore
if "not in the list of formats with fast writers" not in str(e.value):
raise e
return
if isinstance(out, StringIO):
# Output went to a buffer
actual = out.getvalue()
else:
# Output went to a file
if str(out).startswith("~"):
# Ensure a file hasn't been accidentally written to a literal tilde
# path
assert not os.path.exists(out)
out = os.path.expanduser(out)
assert os.path.exists(out)
with open(out) as f:
actual = f.read()
os.remove(out)
print(f"Expected:\n{test_def['out']}")
print(f"Actual:\n{actual}")
assert [x.strip() for x in actual.strip().splitlines()] == [
x.strip() for x in test_def["out"].strip().splitlines()
]
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize(
"path_format", ["buffer", "plain", "tilde-str", "tilde-pathlib"]
)
def test_write_table(fast_writer, tmp_path, home_is_tmpdir, path_format):
table = ascii.get_reader(Reader=ascii.Daophot)
data = table.read("data/daophot.dat")
if path_format == "buffer":
out_name = None
elif path_format == "plain":
out_name = tmp_path / "table"
elif path_format == "tilde-str":
out_name = os.path.join("~", "table")
else:
out_name = pathlib.Path("~", "table")
for test_def in test_defs:
check_write_table(test_def, data, fast_writer, out=out_name)
check_write_table_via_table(test_def, data, fast_writer, out=out_name)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_values(fast_writer):
data = ascii.read(tab_to_fill)
for test_def in test_defs_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_masked_different(fast_writer):
"""see discussion in #2255"""
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data["a"].mask = [True, False]
data["c"].mask = [False, True]
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_no_data_ipac(fast_writer):
"""Write an IPAC table that contains no data."""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
for test_def in test_defs_no_data:
check_write_table(test_def, data, fast_writer)
check_write_table_via_table(test_def, data, fast_writer)
def test_write_invalid_toplevel_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored in the top-level metadata and therefore should
raise a warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
data.meta["blah"] = "extra"
out = StringIO()
with pytest.warns(AstropyWarning, match=r".*were not written.*") as warn:
data.write(out, format="ascii.ipac")
assert len(warn) == 1
def test_write_invalid_keyword_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored appropriately in the ``keywords`` section
of the metadata but with invalid format and therefore should raise a
warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
data.meta["keywords"]["blah"] = "invalid"
out = StringIO()
with pytest.warns(AstropyWarning, match=r".*has been skipped.*") as warn:
data.write(out, format="ascii.ipac")
assert len(warn) == 1
def test_write_valid_meta_ipac():
"""Write an IPAC table that contains no data and has *correctly* specified
metadata. No warnings should be issued"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
data.meta["keywords"]["blah"] = {"value": "invalid"}
out = StringIO()
data.write(out, format="ascii.ipac")
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_comments(fast_writer):
"""Write comments in output originally read by io.ascii."""
data = ascii.read("#c1\n # c2\t\na,b,c\n# c3\n1,2,3")
out = StringIO()
ascii.write(data, out, format="basic", fast_writer=fast_writer)
expected = ["# c1", "# c2", "# c3", "a b c", "1 2 3"]
assert out.getvalue().splitlines() == expected
# header comes before comments for commented-header
out = StringIO()
ascii.write(data, out, format="commented_header", fast_writer=fast_writer)
expected = ["# a b c", "# c1", "# c2", "# c3", "1 2 3"]
assert out.getvalue().splitlines() == expected
# setting comment=False should disable comment writing
out = StringIO()
ascii.write(data, out, format="basic", comment=False, fast_writer=fast_writer)
expected = ["a b c", "1 2 3"]
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize("fmt", ["%0.1f", ".1f", "0.1f", "{0:0.1f}"])
def test_write_format(fast_writer, fmt):
"""Check different formats for a column."""
data = ascii.read("#c1\n # c2\t\na,b,c\n# c3\n1.11,2.22,3.33")
out = StringIO()
expected = ["# c1", "# c2", "# c3", "a b c", "1.1 2.22 3.33"]
data["a"].format = fmt
ascii.write(data, out, format="basic", fast_writer=fast_writer)
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
def test_strip_names(fast_writer):
"""Names should be stripped of whitespace by default."""
data = table.Table([[1], [2], [3]], names=(" A", "B ", " C "))
out = StringIO()
ascii.write(data, out, format="csv", fast_writer=fast_writer)
assert out.getvalue().splitlines()[0] == "A,B,C"
def test_latex_units():
"""
Check to make sure that Latex and AASTex writers attempt to fall
back on the **unit** attribute of **Column** if the supplied
**latexdict** does not specify units.
"""
t = table.Table(
[
table.Column(name="date", data=["a", "b"]),
table.Column(name="NUV exp.time", data=[1, 2]),
]
)
latexdict = copy.deepcopy(ascii.latexdicts["AA"])
latexdict["units"] = {"NUV exp.time": "s"}
out = StringIO()
expected = """\
\\begin{table}{cc}
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2
\\enddata
\\end{table}
""".replace(
"\n", os.linesep
)
ascii.write(t, out, format="aastex", latexdict=latexdict)
assert out.getvalue() == expected
# use unit attribute instead
t["NUV exp.time"].unit = u.s
t["date"].unit = u.yr
out = StringIO()
ascii.write(t, out, format="aastex", latexdict=ascii.latexdicts["AA"])
assert out.getvalue() == expected.replace(
"colhead{s}", r"colhead{$\mathrm{s}$}"
).replace("colhead{ }", r"colhead{$\mathrm{yr}$}")
@pytest.mark.parametrize("fast_writer", [True, False])
def test_commented_header_comments(fast_writer):
"""
Test the fix for #3562 with confusing exception using comment=False
for the commented_header writer.
"""
t = table.Table([[1, 2]])
with pytest.raises(ValueError) as err:
out = StringIO()
ascii.write(
t, out, format="commented_header", comment=False, fast_writer=fast_writer
)
assert "for the commented_header writer you must supply a string" in str(err.value)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_byte_string_output(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([["Hello", "World"]], dtype=["S10"])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["col0", "Hello", "World"]
@pytest.mark.parametrize(
"names, include_names, exclude_names, formats, issues_warning",
[
(["x", "y"], ["x", "y"], ["x"], {"x": "%d", "y": "%f"}, True),
(["x", "y"], ["x", "y"], ["y"], {"x": "%d"}, False),
(["x", "y"], ["x", "y"], [], {"p": "%d", "q": "%f"}, True),
(["x", "y"], ["x", "y"], [], {"z": "%f"}, True),
(["x", "y"], ["x", "y"], [], {"x": "%d"}, False),
(["x", "y"], ["x", "y"], [], {"p": "%d", "y": "%f"}, True),
(["x", "y"], ["x", "y"], [], {}, False),
],
)
def test_names_with_formats(
names, include_names, exclude_names, formats, issues_warning
):
"""Test for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(
t,
out,
names=names,
include_names=include_names,
exclude_names=exclude_names,
formats=formats,
)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize(
"formats, issues_warning",
[
({"p": "%d", "y": "%f"}, True),
({"x": "%d", "y": "%f"}, True),
({"z": "%f"}, True),
({}, False),
],
)
def test_columns_names_with_formats(formats, issues_warning):
"""Test the fix for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(t, out, formats=formats)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_quoted_empty_field(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([["Hello", ""], ["", ""]], dtype=["S10", "S10"])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["col0 col1", 'Hello ""', '"" ""']
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer, delimiter=",")
assert out.getvalue().splitlines() == ["col0,col1", "Hello,", ","]
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_empty_table(fast_writer):
"""Test writing empty table #8275."""
t = table.Table([[]], dtype=["S2"])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["col0"]
@pytest.mark.parametrize(
"format", ["ascii", "csv", "html", "latex", "ascii.fixed_width", "html"]
)
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize("path_format", ["plain", "tilde-str", "tilde-pathlib"])
def test_write_overwrite_ascii(
format, fast_writer, tmp_path, home_is_tmpdir, path_format
):
"""Test overwrite argument for various ASCII writers"""
true_filename = tmp_path / "table-tmp.dat"
if path_format == "plain":
filename = true_filename
elif path_format == "tilde-str":
filename = os.path.join("~", "table-tmp.dat")
else:
filename = pathlib.Path("~", "table-tmp.dat")
with open(true_filename, "w"):
# create empty file
pass
t = table.Table([["Hello", ""], ["", ""]], dtype=["S10", "S10"])
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format=format, fast_writer=fast_writer)
t.write(filename, overwrite=True, format=format, fast_writer=fast_writer)
# If the output is a file object, overwrite is ignored
with open(true_filename, "w") as fp:
t.write(fp, overwrite=False, format=format, fast_writer=fast_writer)
t.write(fp, overwrite=True, format=format, fast_writer=fast_writer)
if "tilde" in path_format:
# Ensure no files have been accidentally written to a literal tilde path
assert not os.path.exists(filename)
fmt_name_classes = list(
chain(ascii.core.FAST_CLASSES.items(), ascii.core.FORMAT_CLASSES.items())
)
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_roundtrip_masked(fmt_name_class):
"""
Round trip a simple masked table through every writable format and confirm
that reading back gives the same result.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, "_io_registry_can_write", True):
return
# Skip tests for fixed_width or HTML without bs4
if (fmt_name == "html" and not HAS_BS4) or fmt_name == "fixed_width":
return
if "qdp" in fmt_name:
# QDP tables are for numeric values only
t = simple_table(masked=True, kinds=["f", "i"])
else:
t = simple_table(masked=True)
out = StringIO()
fast = fmt_name in ascii.core.FAST_CLASSES
try:
ascii.write(t, out, format=fmt_name, fast_writer=fast)
except ImportError: # Some failed dependency, skip test
return
# No-header formats need to be told the column names
kwargs = {"names": t.colnames} if "no_header" in fmt_name else {}
if "qdp" in fmt_name:
kwargs.update({"table_id": 0, "names": t.colnames})
t2 = ascii.read(
out.getvalue(), format=fmt_name, fast_reader=fast, guess=False, **kwargs
)
assert t.colnames == t2.colnames
for col, col2 in zip(t.itercols(), t2.itercols()):
assert col.dtype.kind == col2.dtype.kind
assert np.all(col == col2)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_newlines(fast_writer, tmp_path):
# Regression test for https://github.com/astropy/astropy/issues/5126
# On windows, when writing to a filename (not e.g. StringIO), newlines were
# \r\r\n instead of \r\n.
filename = tmp_path / "test"
t = table.Table([["a", "b", "c"]], names=["col"])
ascii.write(t, filename, fast_writer=fast_writer)
with open(filename, newline="") as f:
content = f.read()
assert content == os.linesep.join(["col", "a", "b", "c"]) + os.linesep
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_csv_with_comments(fast_writer):
"""
Test fix for #7357 where writing a Table with comments to 'csv' fails with
a cryptic message. The comments are dropped by default, but when comment='#'
is supplied they are still written.
"""
out = StringIO()
t = table.Table([[1, 2], [3, 4]], names=["a", "b"])
t.meta["comments"] = ["hello"]
ascii.write(t, out, format="csv", fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["a,b", "1,3", "2,4"]
out = StringIO()
ascii.write(t, out, format="csv", fast_writer=fast_writer, comment="#")
assert out.getvalue().splitlines() == ["#hello", "a,b", "1,3", "2,4"]
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_formatted_mixin(fast_writer):
"""
Test fix for #8680 where writing a QTable with a quantity mixin generates
an exception if a format is specified.
"""
out = StringIO()
t = table.QTable([[1, 2], [1, 2] * u.m], names=["a", "b"])
ascii.write(t, out, fast_writer=fast_writer, formats={"a": "%02d", "b": "%.2f"})
assert out.getvalue().splitlines() == ["a b", "01 1.00", "02 2.00"]
def test_validate_write_kwargs():
out = StringIO()
t = table.QTable([[1, 2], [1, 2]], names=["a", "b"])
with pytest.raises(
TypeError,
match=r"write\(\) argument 'fast_writer' must be a "
r"\(<class 'bool'>, <class 'str'>\) object, "
r"got <class 'int'> instead",
):
ascii.write(t, out, fast_writer=12)
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_multidim_column_error(fmt_name_class):
"""
Test that trying to write a multidim column fails in every format except
ECSV.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, "_io_registry_can_write", True):
return
# Skip tests for ecsv or HTML without bs4. See the comment in latex.py
# Latex class where max_ndim = None is defined regarding latex and aastex.
if (fmt_name == "html" and not HAS_BS4) or fmt_name in ("ecsv", "latex", "aastex"):
return
out = StringIO()
t = table.Table()
t["a"] = np.arange(16).reshape(2, 2, 2, 2)
t["b"] = [1, 2]
fast = fmt_name in ascii.core.FAST_CLASSES
with pytest.raises(ValueError, match=r"column\(s\) with dimension"):
ascii.write(t, out, format=fmt_name, fast_writer=fast)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_as_columns(fast_writer):
"""
Test that writing a set of columns also roundtrips (as long as the
table does not have metadata, etc.)
"""
# Use masked in case that makes it more difficult.
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data["a"].mask = [True, False]
data["c"].mask = [False, True]
data = list(data.columns.values())
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
|
c4343435c7f3d1b788775955e510f334d85342310ca3b7e18e82215d8d1ab03a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from asdf.extension import AsdfExtension, BuiltinExtension
from asdf.util import filepath_to_url
# Make sure that all tag implementations are imported by the time we create
# the extension class so that _astropy_asdf_types is populated correctly. We
# could do this using __init__ files, except it causes pytest import errors in
# the case that asdf is not installed.
from .tags.coordinates.angle import * # noqa: F401, F403
from .tags.coordinates.earthlocation import * # noqa: F401, F403
from .tags.coordinates.frames import * # noqa: F401, F403
from .tags.coordinates.representation import * # noqa: F401, F403
from .tags.coordinates.skycoord import * # noqa: F401, F403
from .tags.coordinates.spectralcoord import * # noqa: F401, F403
from .tags.fits.fits import * # noqa: F401, F403
from .tags.table.table import * # noqa: F401, F403
from .tags.time.time import * # noqa: F401, F403
from .tags.time.timedelta import * # noqa: F401, F403
from .tags.transform.basic import * # noqa: F401, F403
from .tags.transform.compound import * # noqa: F401, F403
from .tags.transform.functional_models import * # noqa: F401, F403
from .tags.transform.math import * # noqa: F401, F403
from .tags.transform.physical_models import * # noqa: F401, F403
from .tags.transform.polynomial import * # noqa: F401, F403
from .tags.transform.powerlaws import * # noqa: F401, F403
from .tags.transform.projections import * # noqa: F401, F403
from .tags.transform.spline import * # noqa: F401, F403
from .tags.transform.tabular import * # noqa: F401, F403
from .tags.unit.equivalency import * # noqa: F401, F403
from .tags.unit.quantity import * # noqa: F401, F403
from .tags.unit.unit import * # noqa: F401, F403
from .types import _astropy_asdf_types, _astropy_types
__all__ = ["AstropyExtension", "AstropyAsdfExtension"]
ASTROPY_SCHEMA_URI_BASE = "http://astropy.org/schemas/"
SCHEMA_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "data", "schemas")
)
ASTROPY_URL_MAPPING = [
(
ASTROPY_SCHEMA_URI_BASE,
filepath_to_url(os.path.join(SCHEMA_PATH, "astropy.org"))
+ "/{url_suffix}.yaml",
)
]
# This extension is used to register custom types that have both tags and
# schemas defined by Astropy.
class AstropyExtension(AsdfExtension):
@property
def types(self):
return _astropy_types
@property
def tag_mapping(self):
return [
("tag:astropy.org:astropy", ASTROPY_SCHEMA_URI_BASE + "astropy{tag_suffix}")
]
@property
def url_mapping(self):
return ASTROPY_URL_MAPPING
# This extension is used to register custom tag types that have schemas defined
# by ASDF, but have tag implementations defined in astropy.
class AstropyAsdfExtension(BuiltinExtension):
@property
def types(self):
return _astropy_asdf_types
|
9d999760768a81dadca101cb70f487b7924cc2a11215fb8fa46ec33a77dbcf4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from astropy.table import Table # noqa: E402
def make_table():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ["x", "y", "z"]
return Table([a, b, c], names=("a", "b", "c"), meta={"name": "first table"})
def test_table_io(tmpdir):
tmpfile = str(tmpdir.join("table.asdf"))
table = make_table()
table.write(tmpfile)
# Simple sanity check using ASDF directly
with asdf.open(tmpfile) as af:
assert "data" in af.keys()
assert isinstance(af["data"], Table)
assert all(af["data"] == table)
# Now test using the table reader
new_t = Table.read(tmpfile)
assert all(new_t == table)
def test_table_io_custom_key(tmpdir):
tmpfile = str(tmpdir.join("table.asdf"))
table = make_table()
table.write(tmpfile, data_key="something")
# Simple sanity check using ASDF directly
with asdf.open(tmpfile) as af:
assert "something" in af.keys()
assert "data" not in af.keys()
assert isinstance(af["something"], Table)
assert all(af["something"] == table)
# Now test using the table reader
with pytest.raises(KeyError):
new_t = Table.read(tmpfile)
new_t = Table.read(tmpfile, data_key="something")
assert all(new_t == table)
def test_table_io_custom_tree(tmpdir):
tmpfile = str(tmpdir.join("table.asdf"))
table = make_table()
def make_custom_tree(tab):
return dict(foo=dict(bar=tab))
table.write(tmpfile, make_tree=make_custom_tree)
# Simple sanity check using ASDF directly
with asdf.open(tmpfile) as af:
assert "foo" in af.keys()
assert "bar" in af["foo"]
assert "data" not in af.keys()
assert all(af["foo"]["bar"] == table)
# Now test using table reader
with pytest.raises(KeyError):
new_t = Table.read(tmpfile)
def find_table(asdffile):
return asdffile["foo"]["bar"]
new_t = Table.read(tmpfile, find_table=find_table)
assert all(new_t == table)
|
b1a58ec2f48c4a295ce7631495aa42b2dee3847d3052865c052eeccf971ad580 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from astropy.modeling import math_functions
from astropy.modeling.math_functions import * # noqa: F401, F403
from astropy.modeling.math_functions import __all__ as math_classes
__all__ = ["NpUfuncType"]
class NpUfuncType(TransformType):
name = "transform/math_functions"
version = "1.0.0"
types = ["astropy.modeling.math_functions." + kl for kl in math_classes]
@classmethod
def from_tree_transform(cls, node, ctx):
klass_name = math_functions._make_class_name(node["func_name"])
klass = getattr(math_functions, klass_name)
return klass()
@classmethod
def to_tree_transform(cls, model, ctx):
return {"func_name": model.func.__name__}
|
f408840388fda923b13d5de0cc3248bf44e48622b27cd6d447c3f9ff23a58109 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import numpy as np # noqa: E402
from asdf.tags.core.ndarray import NDArrayType # noqa: E402
from asdf.tests import helpers # noqa: E402
from packaging.version import Version # noqa: E402
import astropy.units as u # noqa: E402
from astropy import table # noqa: E402
from astropy.coordinates import EarthLocation, SkyCoord # noqa: E402
from astropy.coordinates.tests.helper import skycoord_equal # noqa: E402
from astropy.io.misc.asdf.tags.tests.helpers import ( # noqa: E402
run_schema_example_test,
)
from astropy.time import Time, TimeDelta # noqa: E402
def test_table(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"))
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 3
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_array_columns(tmpdir):
a = np.array(
[
([[1, 2], [3, 4]], 2.0, "x"),
([[5, 6], [7, 8]], 5.0, "y"),
([[9, 10], [11, 12]], 8.2, "z"),
],
dtype=[("a", "<i4", (2, 2)), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
assert t.columns["a"].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_structured_array_columns(tmpdir):
a = np.array(
[((1, "a"), 2.0, "x"), ((4, "b"), 5.0, "y"), ((5, "c"), 8.2, "z")],
dtype=[("a", [("a0", "<i4"), ("a1", "|S1")]), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_table_row_order(tmpdir):
a = np.array(
[(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")],
dtype=[("a", "<i4"), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"))
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
if Version(asdf.__version__) >= Version("2.8.0"):
# The auto_inline argument is deprecated as of asdf 2.8.0.
with asdf.config_context() as config:
config.array_inline_threshold = 64
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
else:
helpers.assert_roundtrip_tree(
{"table": t},
tmpdir,
asdf_check_func=check,
write_options={"auto_inline": 64},
)
def test_mismatched_columns():
yaml = """
table: !<tag:astropy.org:astropy/table/table-1.0.0>
columns:
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2]
name: a
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2, 3]
name: b
colnames: [a, b]
"""
buff = helpers.yaml_to_asdf(yaml)
with pytest.raises(ValueError) as err:
with asdf.open(buff):
pass
assert "Inconsistent data column lengths" in str(err.value)
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(
rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"), masked=True
)
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["a"].mask = [True, False, True]
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 4
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_quantity_mixin(tmpdir):
t = table.QTable()
t["a"] = [1, 2, 3]
t["b"] = ["x", "y", "z"]
t["c"] = [2.0, 5.0, 8.2] * u.m
def check(ff):
assert isinstance(ff["table"]["c"], u.Quantity)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_time_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"])
def check(ff):
assert isinstance(ff["table"]["c"], Time)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_timedelta_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = TimeDelta([1, 2] * u.day)
def check(ff):
assert isinstance(ff["table"]["c"], TimeDelta)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_skycoord_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = SkyCoord([1, 2], [3, 4], unit="deg,deg", frame="fk4", obstime="J1990.5")
def check(ff):
assert isinstance(ff["table"]["c"], SkyCoord)
def tree_match(old, new):
NDArrayType.assert_equal(new["a"], old["a"])
NDArrayType.assert_equal(new["b"], old["b"])
assert skycoord_equal(new["c"], old["c"])
helpers.assert_roundtrip_tree(
{"table": t}, tmpdir, asdf_check_func=check, tree_match_func=tree_match
)
def test_earthlocation_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
def check(ff):
assert isinstance(ff["table"]["c"], EarthLocation)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_ndarray_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = table.NdarrayMixin([5, 6])
helpers.assert_roundtrip_tree({"table": t}, tmpdir)
def test_backwards_compat():
"""
Make sure that we can continue to read tables that use the schema from
the ASDF Standard.
This test uses the examples in the table schema from the ASDF Standard,
since these make no reference to Astropy's own table definition.
"""
def check(asdffile):
assert isinstance(asdffile["example"], table.Table)
run_schema_example_test("stsci.edu", "asdf", "core/table", "1.0.0", check)
|
ceb2412f128c5ff9d2fdbd6c675138d04d5d92d3d726bbc4e5d286c9dcda0133 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import os # noqa: E402
import numpy as np # noqa: E402
from asdf.tests import helpers # noqa: E402
from astropy.io import fits # noqa: E402
from astropy.io.misc.asdf.tags.tests.helpers import ( # noqa: E402
run_schema_example_test,
)
def test_complex_structure(tmpdir):
with fits.open(
os.path.join(os.path.dirname(__file__), "data", "complex.fits"), memmap=False
) as hdulist:
tree = {"fits": hdulist}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fits_table(tmpdir):
a = np.array([(0, 1), (2, 3)], dtype=[("A", int), ("B", int)])
h = fits.HDUList()
h.append(fits.BinTableHDU.from_columns(a))
tree = {"fits": h}
def check_yaml(content):
assert b"!<tag:astropy.org:astropy/table/table-1.0.0>" in content
helpers.assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
def test_backwards_compat():
"""
Make sure that we can continue to read FITS HDUs that use the schema from
the ASDF Standard.
This test uses the examples in the fits schema from the ASDF Standard,
since these make no reference to Astropy's own fits definition.
"""
def check(asdffile):
assert isinstance(asdffile["example"], fits.HDUList)
run_schema_example_test("stsci.edu", "asdf", "fits/fits", "1.0.0", check)
|
f8512a80ee445ce986be049408c185fb296e6885a46bf3270632b966039c8123 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree # noqa: E402
from astropy import units as u # noqa: E402
from astropy.modeling.models import UnitsMapping # noqa: E402
def assert_model_roundtrip(model, tmpdir):
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir, tree_match_func=assert_models_equal)
def assert_models_equal(a, b):
assert a.name == b.name
assert a.inputs == b.inputs
assert a.input_units == b.input_units
assert a.outputs == b.outputs
assert a.mapping == b.mapping
assert a.input_units_allow_dimensionless == b.input_units_allow_dimensionless
for i in a.inputs:
if a.input_units_equivalencies is None:
a_equiv = None
else:
a_equiv = a.input_units_equivalencies.get(i)
if b.input_units_equivalencies is None:
b_equiv = None
else:
b_equiv = b.input_units_equivalencies.get(i, None)
assert a_equiv == b_equiv
def test_basic(tmpdir):
m = UnitsMapping(((u.m, u.dimensionless_unscaled),))
assert_model_roundtrip(m, tmpdir)
def test_remove_units(tmpdir):
m = UnitsMapping(((u.m, None),))
assert_model_roundtrip(m, tmpdir)
def test_accept_any_units(tmpdir):
m = UnitsMapping(((None, u.m),))
assert_model_roundtrip(m, tmpdir)
def test_with_equivalencies(tmpdir):
m = UnitsMapping(
((u.m, u.dimensionless_unscaled),),
input_units_equivalencies={"x": u.equivalencies.spectral()},
)
assert_model_roundtrip(m, tmpdir)
def test_with_allow_dimensionless(tmpdir):
m = UnitsMapping(
((u.m, u.dimensionless_unscaled), (u.s, u.Hz)),
input_units_allow_dimensionless=True,
)
assert_model_roundtrip(m, tmpdir)
m = UnitsMapping(
((u.m, u.dimensionless_unscaled), (u.s, u.Hz)),
input_units_allow_dimensionless={"x0": True, "x1": False},
)
assert_model_roundtrip(m, tmpdir)
|
4247784a5195f5f845fe3c66bd1f6b3f809d2d6afc00d921fb4517495da5bb63 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import warnings # noqa: E402
import asdf # noqa: E402
import numpy as np # noqa: E402
from asdf import AsdfFile, util # noqa: E402
from asdf.tests import helpers # noqa: E402
from packaging.version import Version # noqa: E402
import astropy.units as u # noqa: E402
from astropy.modeling import models as astmodels # noqa: E402
from astropy.modeling.core import fix_inputs # noqa: E402
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: E402
def custom_and_analytical_inverse():
p1 = astmodels.Polynomial1D(1)
p2 = astmodels.Polynomial1D(1)
p3 = astmodels.Polynomial1D(1)
p4 = astmodels.Polynomial1D(1)
m1 = p1 & p2
m2 = p3 & p4
m1.inverse = m2
return m1
def custom_inputs_outputs():
m = astmodels.Gaussian2D()
m.inputs = ("a", "b")
m.outputs = ("c",)
return m
test_models = [
astmodels.Identity(2),
astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Shift(2.0),
astmodels.Hermite1D(2, c0=2, c1=3, c2=0.5),
astmodels.Legendre1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Legendre2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Hermite2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Scale(3.4),
astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.Multiply(3),
astmodels.Multiply(10 * u.m),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order="xzx"),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.0 * u.deg),
astmodels.Scale(3.4 * u.deg),
astmodels.RotateNative2Celestial(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg),
astmodels.RotateCelestial2Native(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg),
astmodels.RotationSequence3D([1.2, 2.3, 3.4, 0.3], "xyzx"),
astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, 0.3], "xyzy"),
astmodels.AiryDisk2D(amplitude=10.0, x_0=0.5, y_0=1.5),
astmodels.Box1D(amplitude=10.0, x_0=0.5, width=5.0),
astmodels.Box2D(amplitude=10.0, x_0=0.5, x_width=5.0, y_0=1.5, y_width=7.0),
astmodels.Const1D(amplitude=5.0),
astmodels.Const2D(amplitude=5.0),
astmodels.Disk2D(amplitude=10.0, x_0=0.5, y_0=1.5, R_0=5.0),
astmodels.Ellipse2D(amplitude=10.0, x_0=0.5, y_0=1.5, a=2.0, b=4.0, theta=0.1),
astmodels.Exponential1D(amplitude=10.0, tau=3.5),
astmodels.Gaussian1D(amplitude=10.0, mean=5.0, stddev=3.0),
astmodels.Gaussian2D(
amplitude=10.0, x_mean=5.0, y_mean=5.0, x_stddev=3.0, y_stddev=3.0
),
astmodels.KingProjectedAnalytic1D(amplitude=10.0, r_core=5.0, r_tide=2.0),
astmodels.Logarithmic1D(amplitude=10.0, tau=3.5),
astmodels.Lorentz1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Moffat1D(amplitude=10.0, x_0=0.5, gamma=1.2, alpha=2.5),
astmodels.Moffat2D(amplitude=10.0, x_0=0.5, y_0=1.5, gamma=1.2, alpha=2.5),
astmodels.Planar2D(slope_x=0.5, slope_y=1.2, intercept=2.5),
astmodels.RedshiftScaleFactor(z=2.5),
astmodels.RickerWavelet1D(amplitude=10.0, x_0=0.5, sigma=1.2),
astmodels.RickerWavelet2D(amplitude=10.0, x_0=0.5, y_0=1.5, sigma=1.2),
astmodels.Ring2D(amplitude=10.0, x_0=0.5, y_0=1.5, r_in=5.0, width=10.0),
astmodels.Sersic1D(amplitude=10.0, r_eff=1.0, n=4.0),
astmodels.Sersic2D(
amplitude=10.0, r_eff=1.0, n=4.0, x_0=0.5, y_0=1.5, ellip=0.0, theta=0.0
),
astmodels.Sine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.Cosine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.Tangent1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.ArcSine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.ArcCosine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.ArcTangent1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.Trapezoid1D(amplitude=10.0, x_0=0.5, width=5.0, slope=1.0),
astmodels.TrapezoidDisk2D(amplitude=10.0, x_0=0.5, y_0=1.5, R_0=5.0, slope=1.0),
astmodels.Voigt1D(x_0=0.55, amplitude_L=10.0, fwhm_L=0.5, fwhm_G=0.9),
astmodels.BlackBody(scale=10.0, temperature=6000.0 * u.K),
astmodels.Drude1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Plummer1D(mass=10.0, r_plum=5.0),
astmodels.BrokenPowerLaw1D(amplitude=10, x_break=0.5, alpha_1=2.0, alpha_2=3.5),
astmodels.ExponentialCutoffPowerLaw1D(10, 0.5, 2.0, 7.0),
astmodels.LogParabola1D(
amplitude=10,
x_0=0.5,
alpha=2.0,
beta=3.0,
),
astmodels.PowerLaw1D(amplitude=10.0, x_0=0.5, alpha=2.0),
astmodels.SmoothlyBrokenPowerLaw1D(
amplitude=10.0, x_break=5.0, alpha_1=2.0, alpha_2=3.0, delta=0.5
),
custom_and_analytical_inverse(),
custom_inputs_outputs(),
]
if HAS_SCIPY:
test_models.append(
astmodels.Spline1D(
np.array([-3.0, -3.0, -3.0, -3.0, -1.0, 0.0, 1.0, 3.0, 3.0, 3.0, 3.0]),
np.array(
[
0.10412331,
0.07013616,
-0.18799552,
1.35953147,
-0.15282581,
0.03923,
-0.04297299,
0.0,
0.0,
0.0,
0.0,
]
),
3,
)
)
math_models = []
for kl in astmodels.math.__all__:
klass = getattr(astmodels.math, kl)
math_models.append(klass())
test_models.extend(math_models)
test_models_with_constraints = [
astmodels.Legendre2D(
x_degree=1,
y_degree=1,
c0_0=1,
c0_1=2,
c1_0=3,
fixed={"c1_0": True, "c0_1": True},
bounds={"c0_0": (-10, 10)},
)
]
test_models.extend(test_models_with_constraints)
def test_transforms_compound(tmpdir):
tree = {
"compound": astmodels.Shift(1) & astmodels.Shift(2)
| astmodels.Sky2Pix_TAN()
| astmodels.Rotation2D()
| astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32])
+ astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {"rotation": rotation, "real_rotation": real_rotation}
def check(ff):
assert ff.tree["rotation"].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize("model", test_models)
def test_single_model(tmpdir, model):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.6.0 which causes warnings
if Version(asdf.__version__) <= Version("2.6.0"):
warnings.filterwarnings("ignore", "Unable to locate schema file")
tree = {"single_model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree["rot"].name == "foo"
tree = {"rot": astmodels.Rotation2D(23, name="foo")}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {"azp": astmodels.Sky2Pix_AZP(0.5, 0.3)}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree["model"].name == "compound_model"
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename("compound_model")
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
@pytest.mark.slow
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
"forward": util.resolve_name(
f"astropy.modeling.projections.Sky2Pix_{name}"
)(),
"backward": util.resolve_name(
f"astropy.modeling.projections.Pix2Sky_{name}"
)(),
}
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version("2.5.1"):
warnings.filterwarnings("ignore", "Unable to locate schema file")
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1.0, 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 0.0]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(
points,
lookup_table=table,
bounds_error=False,
fill_value=None,
method="nearest",
)
tree = {"model": model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
def test_const1d(tmpdir, standard_version):
helpers.assert_roundtrip_tree(
{"model": astmodels.Const1D(amplitude=5.0)},
tmpdir,
init_options={"version": standard_version},
)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
@pytest.mark.parametrize(
"model",
[
astmodels.Polynomial1D(1, c0=5, c1=17),
astmodels.Polynomial1D(1, c0=5, c1=17, domain=[-5, 4], window=[-2, 3]),
astmodels.Polynomial2D(2, c0_0=3, c1_0=5, c0_1=7),
astmodels.Polynomial2D(
2,
c0_0=3,
c1_0=5,
c0_1=7,
x_domain=[-2, 2],
y_domain=[-4, 4],
x_window=[-6, 6],
y_window=[-8, 8],
),
],
)
def test_polynomial(tmpdir, standard_version, model):
helpers.assert_roundtrip_tree(
{"model": model}, tmpdir, init_options={"version": standard_version}
)
def test_domain_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5, domain=[-2, 2])
model2d = astmodels.Chebyshev2D(
1, 1, c0_0=1, c0_1=2, c1_0=3, x_domain=[-2, 2], y_domain=[-2, 2]
)
fa = AsdfFile()
fa.tree["model1d"] = model1d
fa.tree["model2d"] = model2d
file_path = str(tmpdir.join("orthopoly_domain.asdf"))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree["model1d"](1.8) == model1d(1.8)
assert f.tree["model2d"](1.8, -1.5) == model2d(1.8, -1.5)
def test_window_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(
2, c0=2, c1=3, c2=0.5, domain=[-2, 2], window=[-0.5, 0.5]
)
model2d = astmodels.Chebyshev2D(
1,
1,
c0_0=1,
c0_1=2,
c1_0=3,
x_domain=[-2, 2],
y_domain=[-2, 2],
x_window=[-0.5, 0.5],
y_window=[-0.1, 0.5],
)
fa = AsdfFile()
fa.tree["model1d"] = model1d
fa.tree["model2d"] = model2d
file_path = str(tmpdir.join("orthopoly_window.asdf"))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree["model1d"](1.8) == model1d(1.8)
assert f.tree["model2d"](1.8, -1.5) == model2d(1.8, -1.5)
def test_linear1d(tmpdir):
model = astmodels.Linear1D()
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d_quantity(tmpdir):
model = astmodels.Linear1D(1 * u.nm, 1 * (u.nm / u.pixel))
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model_units(tmpdir):
points = np.arange(0, 5) * u.pix
values = [1.0, 10, 2, 45, -3] * u.nm
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 0.0]]) * u.nm
points = ([1, 2, 3], [1, 2, 3]) * u.pix
model2 = astmodels.Tabular2D(
points,
lookup_table=table,
bounds_error=False,
fill_value=None,
method="nearest",
)
tree = {"model": model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs(tmpdir):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version("2.5.1"):
warnings.filterwarnings("ignore", "Unable to locate schema file")
model0 = astmodels.Pix2Sky_TAN()
model0.input_units_equivalencies = {
"x": u.dimensionless_angles(),
"y": u.dimensionless_angles(),
}
model1 = astmodels.Rotation2D()
model = model0 | model1
tree = {
"compound": fix_inputs(model, {"x": 45}),
"compound1": fix_inputs(model, {0: 45}),
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs_type(tmpdir):
with pytest.raises(TypeError):
tree = {"compound": fix_inputs(3, {"x": 45})}
helpers.assert_roundtrip_tree(tree, tmpdir)
with pytest.raises(AttributeError):
tree = {"compound": astmodels.Pix2Sky_TAN() & {"x": 45}}
helpers.assert_roundtrip_tree(tree, tmpdir)
comp_model = custom_and_analytical_inverse()
@pytest.mark.parametrize(
"model",
[
astmodels.Shift(1) & astmodels.Shift(2) | comp_model,
comp_model | astmodels.Shift(1) & astmodels.Shift(2),
astmodels.Shift(1) & comp_model,
comp_model & astmodels.Shift(1),
],
)
def test_custom_and_analytical(model, tmpdir):
fa = AsdfFile()
fa.tree["model"] = model
file_path = str(tmpdir.join("custom_and_analytical_inverse.asdf"))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree["model"].inverse is not None
def test_deserialize_compound_user_inverse(tmpdir):
"""
Confirm that we are able to correctly reconstruct a
compound model with a user inverse set on one of its
component models.
Due to code in TransformType that facilitates circular
inverses, the user inverse of the component model is
not available at the time that the CompoundModel is
constructed.
"""
yaml = """
model: !transform/concatenate-1.2.0
forward:
- !transform/shift-1.2.0
inverse: !transform/shift-1.2.0 {offset: 5.0}
offset: -10.0
- !transform/shift-1.2.0 {offset: -20.0}
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as af:
model = af["model"]
assert model.has_inverse()
assert model.inverse(-5, -20) == (0, 0)
# test some models and compound models with some input unit equivalencies
def models_with_input_eq():
# 1D model
m1 = astmodels.Shift(1 * u.kg)
m1.input_units_equivalencies = {"x": u.mass_energy()}
# 2D model
m2 = astmodels.Const2D(10 * u.Hz)
m2.input_units_equivalencies = {
"x": u.dimensionless_angles(),
"y": u.dimensionless_angles(),
}
# 2D model with only one input equivalencies
m3 = astmodels.Const2D(10 * u.Hz)
m3.input_units_equivalencies = {"x": u.dimensionless_angles()}
# model using equivalency that has args using units
m4 = astmodels.PowerLaw1D(amplitude=1 * u.m, x_0=10 * u.pix, alpha=7)
m4.input_units_equivalencies = {
"x": u.equivalencies.pixel_scale(0.5 * u.arcsec / u.pix)
}
return [m1, m2, m3, m4]
def compound_models_with_input_eq():
m1 = astmodels.Gaussian1D(10 * u.K, 11 * u.arcsec, 12 * u.arcsec)
m1.input_units_equivalencies = {"x": u.parallax()}
m2 = astmodels.Gaussian1D(5 * u.s, 2 * u.K, 3 * u.K)
m2.input_units_equivalencies = {"x": u.temperature()}
return [m1 | m2, m1 & m2, m1 + m2]
test_models.extend(models_with_input_eq())
test_models.extend(compound_models_with_input_eq())
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.